code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import plotly.plotly as py
"""Get data from csv and split it"""
data = open('Real_Final_database_02.csv')
alldata = data.readlines()
listdata = []
for ix in alldata:
listdata.append(ix.strip().split(','))
"""Seperate data in each type of disaster."""
all_disaster = {'Drought':0, 'Flood':0, 'Storm':0, 'Epidemic':0, 'Earthquake':0}
for iy in listdata:
if iy[0] == 'Indonesia' and iy[2] in all_disaster:
all_disaster[iy[2]] += 1
"""Calculate each type for make an average."""
total = sum(all_disaster.values())
average = []
for iz in all_disaster:
all_disaster[iz] = float("%.2f" % ((all_disaster[iz]/total)*100))
label = [i for i in all_disaster]
value = [all_disaster[j] for j in label]
"""Apprerance"""
make_circle = {"data": [{"values":value,"labels":label,
"name": "Average", "hoverinfo":"label+percent+name", "hole": 0.39, "type": "pie"}],
"layout": {"title":"Indonesia's Average Disaster from 2000 to 2014", "annotations": [{"font": {"size": 20},
"showarrow": False, "text": ""}]}}
url = py.plot(make_circle, filename='Indonesia\'s Average Disaster from 200o to 2014')
| [
"plotly.plotly.plot"
] | [((1028, 1107), 'plotly.plotly.plot', 'py.plot', (['make_circle'], {'filename': '"""Indonesia\'s Average Disaster from 200o to 2014"""'}), '(make_circle, filename="Indonesia\'s Average Disaster from 200o to 2014")\n', (1035, 1107), True, 'import plotly.plotly as py\n')] |
import numpy as np
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import cm
from scipy import signal
import matplotlib.image as mpimg
# matplotlib.use('Agg')
# define normalized 2D gaussian
def gaus2d(x, y, mx, my, sx, sy):
return 1. / (2. * np.pi * sx * sy) * np.exp(-((x - mx)**2. / (2. * sx**2.) + (y - my)**2. / (2. * sy**2.)))
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
ellipse = Ellipse(xy=(0,0), width=3.6, height=1.8, edgecolor='r', lw=2, facecolor='none')
x = np.linspace(0, 10, 101)
y = np.linspace(0, 10, 101)
x1, y1 = np.meshgrid(x, y) # get 2D variables instead of 1D
z1 = gaus2d(x1, y1, 5, 5, 2.7, 1.35)
z1_copy = z1.copy()
z1 = z1/z1.max()
x2, y2 = np.meshgrid(x, y) # get 2D variables instead of 1D
z2 = gaus2d(x2, y2, 5, 5, 0.9, 0.45)
z2_copy = z2.copy()
z2 = z2/z2.max()
dog_not_norm = z1 - z2
dog = (z1 - z2)/np.max(z1-z2)
dog[dog<0] = 0
# path
# path1 = 'image_puck.png'
# img1 = mpimg.imread(path1)
# gray1 = rgb2gray(img1)
# img1 = (np.array(gray1))[0:84, 0:84]
# path2 = 'circle.png'
# img2 = mpimg.imread(path2)
# gray2 = rgb2gray(img2)
# img2 = (np.array(gray1))[0:84, 0:84]
# img_conv = signal.convolve2d(img1, z1)
# # img_product = img1 * img2
#
# # Displaying the image
# fig1 = plt.figure()
#
# plt.imshow(img_conv)
# plt.show()
# fig2 = plt.figure()
# plt.imshow(img)
# plt.show()
fig = plt.figure()
ax1 = fig.add_subplot(3,2,5)
ax1.add_artist(ellipse)
im = ax1.imshow(dog, cmap="viridis", extent=(-5, 5, -5, 5))
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.title.set_text('dog 2D')
cbar = fig.colorbar(im, ax=ax1)
ax2 = fig.add_subplot(3,2,6,projection='3d')
ax2.contour3D(x, y, dog, 100, cmap=cm.viridis)
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.set_zlabel('z')
ax2.title.set_text('dog 3D')
ax3 = fig.add_subplot(3,2,1)
im1 = ax3.imshow(z1, cmap="viridis", extent=(-5, 5, -5, 5))
ax3.set_xlabel('x')
ax3.set_ylabel('y')
ax3.title.set_text('g1 2D')
ax4 = fig.add_subplot(3,2,2,projection='3d')
ax4.contour3D(x, y, z1, 50, cmap=cm.viridis)
ax4.set_xlabel('x')
ax4.set_ylabel('y')
ax4.set_zlabel('z')
ax4.title.set_text('g1 3D')
ax5 = fig.add_subplot(3,2,3)
im2 = ax5.imshow(z2, cmap="viridis", extent=(-5, 5, -5, 5))
ax5.set_xlabel('x')
ax5.set_ylabel('y')
ax5.title.set_text('g2 2D')
ax6 = fig.add_subplot(3,2,4,projection='3d')
ax6.contour3D(x, y, z2, 50, cmap=cm.viridis)
ax6.set_xlabel('x')
ax6.set_ylabel('y')
ax6.set_zlabel('z')
ax6.title.set_text('g2 3D')
plt.show()
| [
"numpy.max",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.dot",
"numpy.meshgrid",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.show"
] | [((487, 572), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': '(0, 0)', 'width': '(3.6)', 'height': '(1.8)', 'edgecolor': '"""r"""', 'lw': '(2)', 'facecolor': '"""none"""'}), "(xy=(0, 0), width=3.6, height=1.8, edgecolor='r', lw=2, facecolor='none'\n )\n", (494, 572), False, 'from matplotlib.patches import Ellipse\n'), ((571, 594), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(101)'], {}), '(0, 10, 101)\n', (582, 594), True, 'import numpy as np\n'), ((599, 622), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(101)'], {}), '(0, 10, 101)\n', (610, 622), True, 'import numpy as np\n'), ((632, 649), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (643, 649), True, 'import numpy as np\n'), ((767, 784), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (778, 784), True, 'import numpy as np\n'), ((1424, 1436), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1434, 1436), True, 'import matplotlib.pyplot as plt\n'), ((2504, 2514), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2512, 2514), True, 'import matplotlib.pyplot as plt\n'), ((429, 473), 'numpy.dot', 'np.dot', (['rgb[..., :3]', '[0.2989, 0.587, 0.114]'], {}), '(rgb[..., :3], [0.2989, 0.587, 0.114])\n', (435, 473), True, 'import numpy as np\n'), ((932, 947), 'numpy.max', 'np.max', (['(z1 - z2)'], {}), '(z1 - z2)\n', (938, 947), True, 'import numpy as np\n'), ((327, 415), 'numpy.exp', 'np.exp', (['(-((x - mx) ** 2.0 / (2.0 * sx ** 2.0) + (y - my) ** 2.0 / (2.0 * sy ** 2.0)))'], {}), '(-((x - mx) ** 2.0 / (2.0 * sx ** 2.0) + (y - my) ** 2.0 / (2.0 * sy **\n 2.0)))\n', (333, 415), True, 'import numpy as np\n')] |
import requests
class BuscaEndereco:
def __init__(self,cep):
if self.valida_cep(str(cep)):
self.cep = str(cep)
else:
raise ValueError("CEP Inválido !!!")
def __str__(self):
return self.formata_cep()
def valida_cep(self,cep):
if len(self.cep == 8):
return True
else:
return False
def formata_cep(self,cep):
return "{}-{}".format(self.cep[:5],self.cep[5:])
def busca_dados (self):
url = "https://viacep.com.br/ws/{}/json/".format(self.cep)
r = requests.get(url)
return r
| [
"requests.get"
] | [((585, 602), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (597, 602), False, 'import requests\n')] |
import helpers
import sys
from representations.sequentialembedding import SequentialEmbedding
"""
Let's examine the closest neighbors for a word over time
"""
import collections
from sklearn.manifold import TSNE
import numpy as np
import matplotlib.pyplot as plt
WORDS = helpers.get_words()
if __name__ == "__main__":
embeddings = helpers.load_embeddings()
for word1 in WORDS:
time_sims, lookups, nearests, sims = helpers.get_time_sims(embeddings, word1)
helpers.clear_figure()
# we remove word1 from our words because we just want to plot the different
# related words
words = filter(lambda word: word.split("|")[0] != word1, lookups.keys())
words = list(words)
values = [lookups[word] for word in words]
fitted = helpers.fit_tsne(values)
if not len(fitted):
print("Couldn't model word", word1)
continue
cmap = helpers.get_cmap(len(time_sims))
annotations = helpers.plot_words(word1, words, fitted, cmap, sims)
helpers.savefig("%s_shaded" % word1)
for year, sim in time_sims.items():
print(year, sim)
| [
"helpers.savefig",
"helpers.get_words",
"helpers.fit_tsne",
"helpers.clear_figure",
"helpers.get_time_sims",
"helpers.plot_words",
"helpers.load_embeddings"
] | [((275, 294), 'helpers.get_words', 'helpers.get_words', ([], {}), '()\n', (292, 294), False, 'import helpers\n'), ((339, 364), 'helpers.load_embeddings', 'helpers.load_embeddings', ([], {}), '()\n', (362, 364), False, 'import helpers\n'), ((435, 475), 'helpers.get_time_sims', 'helpers.get_time_sims', (['embeddings', 'word1'], {}), '(embeddings, word1)\n', (456, 475), False, 'import helpers\n'), ((485, 507), 'helpers.clear_figure', 'helpers.clear_figure', ([], {}), '()\n', (505, 507), False, 'import helpers\n'), ((795, 819), 'helpers.fit_tsne', 'helpers.fit_tsne', (['values'], {}), '(values)\n', (811, 819), False, 'import helpers\n'), ((988, 1040), 'helpers.plot_words', 'helpers.plot_words', (['word1', 'words', 'fitted', 'cmap', 'sims'], {}), '(word1, words, fitted, cmap, sims)\n', (1006, 1040), False, 'import helpers\n'), ((1050, 1086), 'helpers.savefig', 'helpers.savefig', (["('%s_shaded' % word1)"], {}), "('%s_shaded' % word1)\n", (1065, 1086), False, 'import helpers\n')] |
from __future__ import absolute_import
import six
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.serializers import serialize
from sentry.models import Project, Team
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario('ListOrganizationProjects')
def list_organization_projects_scenario(runner):
runner.request(
method='GET',
path='/organizations/%s/projects/' % runner.org.slug
)
class OrganizationProjectsEndpoint(OrganizationEndpoint):
doc_section = DocSection.ORGANIZATIONS
@attach_scenarios([list_organization_projects_scenario])
def get(self, request, organization):
"""
List an Organization's Projects
```````````````````````````````
Return a list of projects bound to a organization.
:pparam string organization_slug: the slug of the organization for
which the projects should be listed.
:auth: required
"""
if request.auth and not request.user.is_authenticated():
# TODO: remove this, no longer supported probably
if hasattr(request.auth, 'project'):
team_list = [request.auth.project.team]
project_list = [request.auth.project]
elif request.auth.organization is not None:
org = request.auth.organization
team_list = list(Team.objects.filter(
organization=org,
))
project_list = list(Project.objects.filter(
team__in=team_list,
).order_by('name'))
else:
return Response({'detail': 'Current access does not point to '
'organization.'}, status=400)
else:
team_list = list(request.access.teams)
project_list = list(Project.objects.filter(
team__in=team_list,
).order_by('name'))
team_map = {
d['id']: d
for d in serialize(team_list, request.user)
}
context = []
for project, pdata in zip(project_list, serialize(project_list, request.user)):
assert six.text_type(project.id) == pdata['id']
pdata['team'] = team_map[six.text_type(project.team_id)]
context.append(pdata)
return Response(context)
| [
"sentry.models.Team.objects.filter",
"six.text_type",
"rest_framework.response.Response",
"sentry.api.serializers.serialize",
"sentry.utils.apidocs.scenario",
"sentry.utils.apidocs.attach_scenarios",
"sentry.models.Project.objects.filter"
] | [((348, 384), 'sentry.utils.apidocs.scenario', 'scenario', (['"""ListOrganizationProjects"""'], {}), "('ListOrganizationProjects')\n", (356, 384), False, 'from sentry.utils.apidocs import scenario, attach_scenarios\n'), ((652, 707), 'sentry.utils.apidocs.attach_scenarios', 'attach_scenarios', (['[list_organization_projects_scenario]'], {}), '([list_organization_projects_scenario])\n', (668, 707), False, 'from sentry.utils.apidocs import scenario, attach_scenarios\n'), ((2479, 2496), 'rest_framework.response.Response', 'Response', (['context'], {}), '(context)\n', (2487, 2496), False, 'from rest_framework.response import Response\n'), ((2260, 2297), 'sentry.api.serializers.serialize', 'serialize', (['project_list', 'request.user'], {}), '(project_list, request.user)\n', (2269, 2297), False, 'from sentry.api.serializers import serialize\n'), ((2145, 2179), 'sentry.api.serializers.serialize', 'serialize', (['team_list', 'request.user'], {}), '(team_list, request.user)\n', (2154, 2179), False, 'from sentry.api.serializers import serialize\n'), ((2319, 2344), 'six.text_type', 'six.text_type', (['project.id'], {}), '(project.id)\n', (2332, 2344), False, 'import six\n'), ((2397, 2427), 'six.text_type', 'six.text_type', (['project.team_id'], {}), '(project.team_id)\n', (2410, 2427), False, 'import six\n'), ((1771, 1857), 'rest_framework.response.Response', 'Response', (["{'detail': 'Current access does not point to organization.'}"], {'status': '(400)'}), "({'detail': 'Current access does not point to organization.'},\n status=400)\n", (1779, 1857), False, 'from rest_framework.response import Response\n'), ((1516, 1553), 'sentry.models.Team.objects.filter', 'Team.objects.filter', ([], {'organization': 'org'}), '(organization=org)\n', (1535, 1553), False, 'from sentry.models import Project, Team\n'), ((1987, 2029), 'sentry.models.Project.objects.filter', 'Project.objects.filter', ([], {'team__in': 'team_list'}), '(team__in=team_list)\n', (2009, 2029), False, 'from sentry.models import Project, Team\n'), ((1630, 1672), 'sentry.models.Project.objects.filter', 'Project.objects.filter', ([], {'team__in': 'team_list'}), '(team__in=team_list)\n', (1652, 1672), False, 'from sentry.models import Project, Team\n')] |
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.shortcuts import render
from cloudinary.models import CloudinaryField
from .utils import get_random_code
from django.template.defaultfilters import slugify
from django.contrib.auth import get_user_model
from kwikposts.models import KwikPost, Comment, Like
from django.db.models import Q
# Create your models here.
class ProfileManager(models.Manager):
def get_all_profiles_to_invite(self, sender):
profiles = Profile.objects.all().exclude(user=sender)
profile = Profile.objects.get(user=sender)
friend_relation = Relationship.objects.filter(Q(sender=profile) | Q(receiver=profile))
print(friend_relation)
accepted = set([])
for rel in friend_relation:
if rel.status == 'accepted':
accepted.add(rel.receiver)
accepted.add(rel.sender)
print(accepted)
available = [profile for profile in profiles if profile not in accepted]
print(available)
return available
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
phone_number = models.CharField(max_length=11, blank=True, null=True)
date_of_birth = models.DateField(blank=True, null=True)
display_picture = CloudinaryField('users/%Y/%m/%d/', default='default_avatar.png')
bio = models.CharField(max_length=140, blank=True, null=True)
gender = models.CharField(max_length=20, blank=True, null=True)
friends = models.ManyToManyField(User, blank=True, related_name='friends')
slug = models.SlugField(unique=True, blank=True)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def get_friends(self):
return self.friends.all()
def get_friends_number(self):
return self.friends.all().count()
def get_likes_given(self):
likes = self.Like.all().count()
total_liked = 0
for item in likes:
if item.values == 'Like':
total_liked += 1
return total_liked
def __str__(self):
return f"Profile for user {self.user.username}-{self.created_at.strftime('%d-%m-%Y')}"
def save(self, *args, **kwargs):
ex = False
if self.user.first_name and self.user.last_name:
to_slug = slugify(str(self.user.first_name) + " " + str(self.user.last_name))
ex = Profile.objects.filter(slug=to_slug).exists()
while ex:
to_slug = slugify(to_slug + " " + str(get_random_code()))
ex = Profile.objects.filter(slug=to_slug).exists()
else:
to_slug = str(self.user)
self.slug = to_slug
super().save(*args, **kwargs)
STATUS_CHOICES = (
('send', 'send'),
('accepted', 'accepted')
)
class RelationshipManager(models.Manager):
def invitations_received(self, receiver):
new_invitation = Relationship.objects.filter(receiver=receiver, status='send')
return new_invitation
class Relationship(models.Model):
sender = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='sender')
receiver = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='receiver')
status = models.CharField(max_length=8, choices=STATUS_CHOICES)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
objects = RelationshipManager()
class Meta:
ordering = ('-created_at',)
def __str__(self):
return f"{self.sender} follows {self.receiver}-{self.status}"
| [
"django.db.models.OneToOneField",
"django.db.models.DateField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.DateTimeField",
"django.db.models.SlugField",
"cloudinary.models.CloudinaryField",
"django.db.models.Q",
"django.db.models.CharField"
] | [((1150, 1222), 'django.db.models.OneToOneField', 'models.OneToOneField', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE'}), '(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n', (1170, 1222), False, 'from django.db import models\n'), ((1242, 1296), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(11)', 'blank': '(True)', 'null': '(True)'}), '(max_length=11, blank=True, null=True)\n', (1258, 1296), False, 'from django.db import models\n'), ((1317, 1356), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1333, 1356), False, 'from django.db import models\n'), ((1379, 1443), 'cloudinary.models.CloudinaryField', 'CloudinaryField', (['"""users/%Y/%m/%d/"""'], {'default': '"""default_avatar.png"""'}), "('users/%Y/%m/%d/', default='default_avatar.png')\n", (1394, 1443), False, 'from cloudinary.models import CloudinaryField\n'), ((1454, 1509), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(140)', 'blank': '(True)', 'null': '(True)'}), '(max_length=140, blank=True, null=True)\n', (1470, 1509), False, 'from django.db import models\n'), ((1523, 1577), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)'}), '(max_length=20, blank=True, null=True)\n', (1539, 1577), False, 'from django.db import models\n'), ((1592, 1656), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'blank': '(True)', 'related_name': '"""friends"""'}), "(User, blank=True, related_name='friends')\n", (1614, 1656), False, 'from django.db import models\n'), ((1668, 1709), 'django.db.models.SlugField', 'models.SlugField', ([], {'unique': '(True)', 'blank': '(True)'}), '(unique=True, blank=True)\n', (1684, 1709), False, 'from django.db import models\n'), ((1727, 1762), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1747, 1762), False, 'from django.db import models\n'), ((1780, 1819), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1800, 1819), False, 'from django.db import models\n'), ((3177, 3252), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'on_delete': 'models.CASCADE', 'related_name': '"""sender"""'}), "(Profile, on_delete=models.CASCADE, related_name='sender')\n", (3194, 3252), False, 'from django.db import models\n'), ((3268, 3345), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'on_delete': 'models.CASCADE', 'related_name': '"""receiver"""'}), "(Profile, on_delete=models.CASCADE, related_name='receiver')\n", (3285, 3345), False, 'from django.db import models\n'), ((3359, 3413), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(8)', 'choices': 'STATUS_CHOICES'}), '(max_length=8, choices=STATUS_CHOICES)\n', (3375, 3413), False, 'from django.db import models\n'), ((3431, 3466), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (3451, 3466), False, 'from django.db import models\n'), ((3484, 3523), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3504, 3523), False, 'from django.db import models\n'), ((691, 708), 'django.db.models.Q', 'Q', ([], {'sender': 'profile'}), '(sender=profile)\n', (692, 708), False, 'from django.db.models import Q\n'), ((711, 730), 'django.db.models.Q', 'Q', ([], {'receiver': 'profile'}), '(receiver=profile)\n', (712, 730), False, 'from django.db.models import Q\n')] |
# -*- coding: utf-8 -*
"""
:py:class:`GenerateLabelFieldReader`
"""
import numpy as np
from senta.common.register import RegisterSet
from senta.common.rule import DataShape, FieldLength, InstanceName
from senta.data.field_reader.base_field_reader import BaseFieldReader
from senta.data.util_helper import generate_pad_batch_data
from senta.modules.token_embedding.custom_fluid_embedding import CustomFluidTokenEmbedding
@RegisterSet.field_reader.register
class GenerateLabelFieldReader(BaseFieldReader):
"""seq2seq label的专用field_reader
"""
def __init__(self, field_config):
"""
:param field_config:
"""
BaseFieldReader.__init__(self, field_config=field_config)
self.paddle_version_code = 1.6
if self.field_config.tokenizer_info:
tokenizer_class = RegisterSet.tokenizer.__getitem__(self.field_config.tokenizer_info["type"])
params = None
if self.field_config.tokenizer_info.__contains__("params"):
params = self.field_config.tokenizer_info["params"]
self.tokenizer = tokenizer_class(vocab_file=self.field_config.vocab_path,
split_char=self.field_config.tokenizer_info["split_char"],
unk_token=self.field_config.tokenizer_info["unk_token"],
params=params)
if self.field_config.embedding_info and self.field_config.embedding_info["use_reader_emb"]:
self.token_embedding = CustomFluidTokenEmbedding(emb_dim=self.field_config.embedding_info["emb_dim"],
vocab_size=self.tokenizer.vocabulary.get_vocab_size())
def init_reader(self):
""" 初始化reader格式
:return: reader的shape[]、type[]、level[]
"""
shape = []
types = []
levels = []
"""train_tar_ids"""
if self.field_config.data_type == DataShape.STRING:
"""src_ids"""
shape.append([-1, self.field_config.max_seq_len])
levels.append(0)
types.append('int64')
else:
raise TypeError("GenerateLabelFieldReader's data_type must be string")
"""mask_ids"""
shape.append([-1, self.field_config.max_seq_len])
levels.append(0)
types.append('float32')
"""seq_lens"""
shape.append([-1])
levels.append(0)
types.append('int64')
"""infer_tar_ids"""
shape.append([-1, self.field_config.max_seq_len, 1])
levels.append(0)
types.append('int64')
"""mask_ids"""
shape.append([-1, self.field_config.max_seq_len])
levels.append(0)
types.append('float32')
"""seq_lens"""
shape.append([-1])
levels.append(0)
types.append('int64')
return shape, types, levels
def convert_texts_to_ids(self, batch_text):
"""将一个batch的明文text转成id
:param batch_text:
:return:
"""
train_src_ids = []
infer_src_ids = []
for text in batch_text:
if self.field_config.need_convert:
tokens = self.tokenizer.tokenize(text)
src_id = self.tokenizer.convert_tokens_to_ids(tokens)
else:
src_id = text.split(" ")
# 加上截断策略
if len(src_id) > self.field_config.max_seq_len - 1:
src_id = src_id[0:self.field_config.max_seq_len - 1]
train_src_id = [self.field_config.label_start_id] + src_id
infer_src_id = src_id + [self.field_config.label_end_id]
train_src_ids.append(train_src_id)
infer_src_ids.append(infer_src_id)
return_list = []
train_label_ids, train_label_mask, label_lens = generate_pad_batch_data(train_src_ids,
pad_idx=self.field_config.padding_id,
return_input_mask=True,
return_seq_lens=True,
paddle_version_code=self.paddle_version_code)
infer_label_ids, infer_label_mask, label_lens = generate_pad_batch_data(infer_src_ids,
pad_idx=self.field_config.padding_id,
return_input_mask=True,
return_seq_lens=True,
paddle_version_code=self.paddle_version_code)
infer_label_ids = np.reshape(infer_label_ids, (infer_label_ids.shape[0], infer_label_ids.shape[1], 1))
return_list.append(train_label_ids)
return_list.append(train_label_mask)
return_list.append(label_lens)
return_list.append(infer_label_ids)
return_list.append(infer_label_mask)
return_list.append(label_lens)
return return_list
def structure_fields_dict(self, slots_id, start_index, need_emb=True):
"""静态图调用的方法,生成一个dict, dict有两个key:id , emb. id对应的是pyreader读出来的各个field产出的id,emb对应的是各个
field对应的embedding
:param slots_id: pyreader输出的完整的id序列
:param start_index:当前需要处理的field在slot_id_list中的起始位置
:param need_emb:是否需要embedding(预测过程中是不需要embedding的)
:return:
"""
record_id_dict = {}
record_id_dict[InstanceName.TRAIN_LABEL_SRC_IDS] = slots_id[start_index]
record_id_dict[InstanceName.TRAIN_LABEL_MASK_IDS] = slots_id[start_index + 1]
record_id_dict[InstanceName.TRAIN_LABEL_SEQ_LENS] = slots_id[start_index + 2]
record_id_dict[InstanceName.INFER_LABEL_SRC_IDS] = slots_id[start_index + 3]
record_id_dict[InstanceName.INFER_LABEL_MASK_IDS] = slots_id[start_index + 4]
record_id_dict[InstanceName.INFER_LABEL_SEQ_LENS] = slots_id[start_index + 5]
record_emb_dict = None
if need_emb and self.token_embedding:
record_emb_dict = self.token_embedding.get_token_embedding(record_id_dict)
record_dict = {}
record_dict[InstanceName.RECORD_ID] = record_id_dict
record_dict[InstanceName.RECORD_EMB] = record_emb_dict
return record_dict
def get_field_length(self):
"""获取当前这个field在进行了序列化之后,在slot_id_list中占多少长度
:return:
"""
return FieldLength.GENERATE_LABEL_FIELD
| [
"senta.data.field_reader.base_field_reader.BaseFieldReader.__init__",
"senta.data.util_helper.generate_pad_batch_data",
"numpy.reshape",
"senta.common.register.RegisterSet.tokenizer.__getitem__"
] | [((652, 709), 'senta.data.field_reader.base_field_reader.BaseFieldReader.__init__', 'BaseFieldReader.__init__', (['self'], {'field_config': 'field_config'}), '(self, field_config=field_config)\n', (676, 709), False, 'from senta.data.field_reader.base_field_reader import BaseFieldReader\n'), ((3848, 4025), 'senta.data.util_helper.generate_pad_batch_data', 'generate_pad_batch_data', (['train_src_ids'], {'pad_idx': 'self.field_config.padding_id', 'return_input_mask': '(True)', 'return_seq_lens': '(True)', 'paddle_version_code': 'self.paddle_version_code'}), '(train_src_ids, pad_idx=self.field_config.padding_id,\n return_input_mask=True, return_seq_lens=True, paddle_version_code=self.\n paddle_version_code)\n', (3871, 4025), False, 'from senta.data.util_helper import generate_pad_batch_data\n'), ((4394, 4571), 'senta.data.util_helper.generate_pad_batch_data', 'generate_pad_batch_data', (['infer_src_ids'], {'pad_idx': 'self.field_config.padding_id', 'return_input_mask': '(True)', 'return_seq_lens': '(True)', 'paddle_version_code': 'self.paddle_version_code'}), '(infer_src_ids, pad_idx=self.field_config.padding_id,\n return_input_mask=True, return_seq_lens=True, paddle_version_code=self.\n paddle_version_code)\n', (4417, 4571), False, 'from senta.data.util_helper import generate_pad_batch_data\n'), ((4910, 4999), 'numpy.reshape', 'np.reshape', (['infer_label_ids', '(infer_label_ids.shape[0], infer_label_ids.shape[1], 1)'], {}), '(infer_label_ids, (infer_label_ids.shape[0], infer_label_ids.\n shape[1], 1))\n', (4920, 4999), True, 'import numpy as np\n'), ((825, 900), 'senta.common.register.RegisterSet.tokenizer.__getitem__', 'RegisterSet.tokenizer.__getitem__', (["self.field_config.tokenizer_info['type']"], {}), "(self.field_config.tokenizer_info['type'])\n", (858, 900), False, 'from senta.common.register import RegisterSet\n')] |
class Solution:
def alphabetBoardPath(self, target):
"""
Time Complexity: O(N)
Space Complexity: O(N)
"""
m = {c: [i // 5, i % 5] for i, c in enumerate("abcdefghijklmnopqrstuvwxyz")}
x0, y0 = 0, 0
res = []
for c in target:
x, y = m[c]
if y < y0:
res.append("L" * (y0 - y))
if x < x0:
res.append("U" * (x0 - x))
if x > x0:
res.append("D" * (x - x0))
if y > y0:
res.append("R" * (y - y0))
res.append("!")
x0, y0 = x, y
return "".join(res)
def stringToString(input):
import json
return json.loads(input)
def main():
import sys
import io
def readlines():
for line in io.TextIOWrapper(sys.stdin.buffer, encoding="utf-8"):
yield line.strip("\n")
lines = readlines()
while True:
try:
line = next(lines)
#target = stringToString(line)
target = line
ret = Solution().alphabetBoardPath(target)
out = ret
print(out)
break
except StopIteration:
break
if __name__ == "__main__":
main()
| [
"json.loads",
"io.TextIOWrapper"
] | [((718, 735), 'json.loads', 'json.loads', (['input'], {}), '(input)\n', (728, 735), False, 'import json\n'), ((821, 873), 'io.TextIOWrapper', 'io.TextIOWrapper', (['sys.stdin.buffer'], {'encoding': '"""utf-8"""'}), "(sys.stdin.buffer, encoding='utf-8')\n", (837, 873), False, 'import io\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from knack.log import get_logger
from azure.cli.core.util import sdk_no_wait
from ._client_factory import network_client_factory, network_client_route_table_factory
from ._util import _get_property
logger = get_logger(__name__)
class UpdateContext(object):
def __init__(self, instance):
self.instance = instance
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def update_param(self, prop, value, allow_clear):
if value == '' and allow_clear:
setattr(self.instance, prop, None)
elif value is not None:
setattr(self.instance, prop, value)
def _generic_list(cli_ctx, operation_name, resource_group_name):
ncf = network_client_factory(cli_ctx)
operation_group = getattr(ncf, operation_name)
if resource_group_name:
return operation_group.list_by_resource_group(resource_group_name)
return operation_group.list()
def _get_property(items, name):
result = next((x for x in items if x.name.lower() == name.lower()), None)
if not result:
raise CLIError("Property '{}' does not exist".format(name))
return result
def _upsert(parent, collection_name, obj_to_add, key_name, warn=True):
if not getattr(parent, collection_name, None):
setattr(parent, collection_name, [])
collection = getattr(parent, collection_name, None)
value = getattr(obj_to_add, key_name)
if value is None:
raise CLIError(
"Unable to resolve a value for key '{}' with which to match.".format(key_name))
match = next((x for x in collection if getattr(x, key_name, None) == value), None)
if match:
if warn:
logger.warning("Item '%s' already exists. Replacing with new values.", value)
collection.remove(match)
collection.append(obj_to_add)
def _find_item_at_path(instance, path):
# path accepts the pattern property/name/property/name
curr_item = instance
path_comps = path.split('.')
for i, comp in enumerate(path_comps):
if i % 2:
# name
curr_item = next((x for x in curr_item if x.name == comp), None)
else:
# property
curr_item = getattr(curr_item, comp, None)
if not curr_item:
raise CLIError("not found: '{}' not found at path '{}'".format(comp, '.'.join(path_comps[:i])))
return curr_item
# region VirtualWAN
def create_virtual_wan(cmd, resource_group_name, virtual_wan_name, tags=None, location=None,
security_provider_name=None, branch_to_branch_traffic=None,
vnet_to_vnet_traffic=None, office365_category=None, disable_vpn_encryption=None,
vwan_type=None):
client = network_client_factory(cmd.cli_ctx).virtual_wans
VirtualWAN = cmd.get_models('VirtualWAN')
wan = VirtualWAN(
tags=tags,
location=location,
disable_vpn_encryption=disable_vpn_encryption,
security_provider_name=security_provider_name,
allow_branch_to_branch_traffic=branch_to_branch_traffic,
allow_vnet_to_vnet_traffic=vnet_to_vnet_traffic,
office365_local_breakout_category=office365_category,
type=vwan_type
)
return client.create_or_update(resource_group_name, virtual_wan_name, wan)
def update_virtual_wan(instance, tags=None, security_provider_name=None, branch_to_branch_traffic=None,
vnet_to_vnet_traffic=None, office365_category=None, disable_vpn_encryption=None,
vwan_type=None):
with UpdateContext(instance) as c:
c.update_param('tags', tags, True)
c.update_param('security_provider_name', security_provider_name, False)
c.update_param('allow_branch_to_branch_traffic', branch_to_branch_traffic, False)
c.update_param('allow_vnet_to_vnet_traffic', vnet_to_vnet_traffic, False)
c.update_param('office365_local_breakout_category', office365_category, False)
c.update_param('disable_vpn_encryption', disable_vpn_encryption, False)
c.update_param('type', vwan_type, False)
return instance
def list_virtual_wans(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'virtual_wans', resource_group_name)
# endregion
# region VirtualHubs
def create_virtual_hub(cmd, resource_group_name, virtual_hub_name, address_prefix, virtual_wan,
location=None, tags=None, no_wait=False, sku=None):
client = network_client_factory(cmd.cli_ctx).virtual_hubs
VirtualHub, SubResource = cmd.get_models('VirtualHub', 'SubResource')
hub = VirtualHub(
tags=tags,
location=location,
address_prefix=address_prefix,
virtual_wan=SubResource(id=virtual_wan),
sku=sku
)
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, virtual_hub_name, hub)
def update_virtual_hub(instance, cmd, address_prefix=None, virtual_wan=None, tags=None, sku=None):
SubResource = cmd.get_models('SubResource')
with UpdateContext(instance) as c:
c.update_param('tags', tags, True)
c.update_param('address_prefix', address_prefix, False)
c.update_param('virtual_wan', SubResource(id=virtual_wan) if virtual_wan else None, False)
c.update_param('sku', sku, False)
return instance
def list_virtual_hubs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'virtual_hubs', resource_group_name)
def create_hub_vnet_connection(cmd, resource_group_name, virtual_hub_name, connection_name, remote_virtual_network,
allow_hub_to_remote_vnet_transit=None, allow_remote_vnet_to_use_hub_vnet_gateways=None,
enable_internet_security=None, no_wait=False):
HubVirtualNetworkConnection, SubResource = cmd.get_models(
'HubVirtualNetworkConnection', 'SubResource')
client = network_client_factory(cmd.cli_ctx).virtual_hubs
hub = client.get(resource_group_name, virtual_hub_name)
connection = HubVirtualNetworkConnection(
name=connection_name,
remote_virtual_network=SubResource(id=remote_virtual_network),
allow_hub_to_remote_vnet_transit=allow_hub_to_remote_vnet_transit,
allow_remote_vnet_to_use_hub_vnet_gateway=allow_remote_vnet_to_use_hub_vnet_gateways,
enable_internet_security=enable_internet_security
)
_upsert(hub, 'virtual_network_connections', connection, 'name', warn=True)
poller = sdk_no_wait(no_wait, client.create_or_update, resource_group_name, virtual_hub_name, hub)
return _get_property(poller.result().virtual_network_connections, connection_name)
# pylint: disable=inconsistent-return-statements
def add_hub_route(cmd, resource_group_name, virtual_hub_name, address_prefixes, next_hop_ip_address, no_wait=False):
VirtualHubRoute = cmd.get_models('VirtualHubRoute')
client = network_client_factory(cmd.cli_ctx).virtual_hubs
hub = client.get(resource_group_name, virtual_hub_name)
route = VirtualHubRoute(address_prefixes=address_prefixes, next_hop_ip_address=next_hop_ip_address)
hub.route_table.routes.append(route)
poller = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, virtual_hub_name, hub)
try:
return poller.result().route_table.routes
except AttributeError:
return
def list_hub_routes(cmd, resource_group_name, virtual_hub_name):
client = network_client_factory(cmd.cli_ctx).virtual_hubs
hub = client.get(resource_group_name, virtual_hub_name)
return hub.route_table.routes
# pylint: disable=inconsistent-return-statements
def remove_hub_route(cmd, resource_group_name, virtual_hub_name, index, no_wait=False):
client = network_client_factory(cmd.cli_ctx).virtual_hubs
hub = client.get(resource_group_name, virtual_hub_name)
try:
hub.route_table.routes.pop(index - 1)
except IndexError:
raise CLIError('invalid index: {}. Index can range from 1 to {}'.format(index, len(hub.route_table.routes)))
poller = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, virtual_hub_name, hub)
try:
return poller.result().route_table.routes
except AttributeError:
return
# pylint: disable=inconsistent-return-statements
def create_vhub_route_table(cmd, resource_group_name, virtual_hub_name, route_table_name,
attached_connections, destination_type, destinations,
next_hop_type, next_hops,
tags=None, no_wait=False, location=None):
VirtualHubRouteTableV2, VirtualHubRouteV2 = cmd.get_models('VirtualHubRouteTableV2', 'VirtualHubRouteV2')
client = network_client_route_table_factory(cmd.cli_ctx).virtual_hub_route_table_v2s
route = VirtualHubRouteV2(destination_type=destination_type,
destinations=destinations,
next_hop_type=next_hop_type,
next_hops=next_hops)
route_table = VirtualHubRouteTableV2(location=location,
tags=tags,
attached_connections=attached_connections,
routes=[route])
poller = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, virtual_hub_name, route_table_name, route_table)
try:
return poller.result()
except AttributeError:
return
def update_vhub_route_table(instance, attached_connections=None, tags=None):
with UpdateContext(instance) as c:
c.update_param('tags', tags, True)
c.update_param('attached_connections', attached_connections, False)
return instance
# pylint: disable=inconsistent-return-statements
def add_hub_routetable_route(cmd, resource_group_name, virtual_hub_name, route_table_name,
destination_type, destinations,
next_hop_type, next_hops, no_wait=False):
VirtualHubRouteV2 = cmd.get_models('VirtualHubRouteV2')
client = network_client_route_table_factory(cmd.cli_ctx).virtual_hub_route_table_v2s
route_table = client.get(resource_group_name, virtual_hub_name, route_table_name)
route = VirtualHubRouteV2(destination_type=destination_type,
destinations=destinations,
next_hop_type=next_hop_type,
next_hops=next_hops)
route_table.routes.append(route)
poller = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, virtual_hub_name, route_table_name, route_table)
try:
return poller.result().routes
except AttributeError:
return
def list_hub_routetable_route(cmd, resource_group_name, virtual_hub_name, route_table_name):
client = network_client_route_table_factory(cmd.cli_ctx).virtual_hub_route_table_v2s
route_table = client.get(resource_group_name, virtual_hub_name, route_table_name)
return route_table.routes
# pylint: disable=inconsistent-return-statements
def remove_hub_routetable_route(cmd, resource_group_name, virtual_hub_name, route_table_name, index, no_wait=False):
client = network_client_route_table_factory(cmd.cli_ctx).virtual_hub_route_table_v2s
route_table = client.get(resource_group_name, virtual_hub_name, route_table_name)
try:
route_table.routes.pop(index - 1)
except IndexError:
raise CLIError('invalid index: {}. Index can range from 1 to {}'.format(index, len(route_table.routes)))
poller = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, virtual_hub_name, route_table_name, route_table)
try:
return poller.result().routes
except AttributeError:
return
# endregion
# region VpnGateways
def create_vpn_gateway(cmd, resource_group_name, gateway_name, virtual_hub,
location=None, tags=None, scale_unit=None,
asn=None, bgp_peering_address=None, peer_weight=None, no_wait=False):
client = network_client_factory(cmd.cli_ctx).vpn_gateways
VpnGateway, SubResource = cmd.get_models('VpnGateway', 'SubResource')
gateway = VpnGateway(
location=location,
tags=tags,
virtual_hub=SubResource(id=virtual_hub) if virtual_hub else None,
vpn_gateway_scale_unit=scale_unit,
bgp_settings={
'asn': asn,
'bgpPeeringAddress': bgp_peering_address,
'peerWeight': peer_weight
}
)
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, gateway_name, gateway)
def update_vpn_gateway(instance, cmd, virtual_hub=None, tags=None, scale_unit=None,
asn=None, bgp_peering_address=None, peer_weight=None):
SubResource = cmd.get_models('SubResource')
with UpdateContext(instance) as c:
c.update_param('virtual_hub', SubResource(id=virtual_hub) if virtual_hub else None, True)
c.update_param('tags', tags, True)
c.update_param('vpn_gateway_scale_unit', scale_unit, False)
bgp_settings = instance.bgp_settings
with UpdateContext(bgp_settings) as c:
c.update_param('asn', asn, False)
c.update_param('bgp_peering_address', bgp_peering_address, False)
c.update_param('peer_weight', peer_weight, False)
return instance
def create_vpn_gateway_connection(cmd, resource_group_name, gateway_name, connection_name,
remote_vpn_site, routing_weight=None, protocol_type=None,
connection_bandwidth=None, shared_key=None, enable_bgp=None,
enable_rate_limiting=None, enable_internet_security=None, no_wait=False):
client = network_client_factory(cmd.cli_ctx).vpn_gateways
VpnConnection, SubResource = cmd.get_models('VpnConnection', 'SubResource')
gateway = client.get(resource_group_name, gateway_name)
conn = VpnConnection(
name=connection_name,
remote_vpn_site=SubResource(id=remote_vpn_site),
routing_weight=routing_weight,
protocol_type=protocol_type,
connection_bandwidth=connection_bandwidth,
shared_key=shared_key,
enable_bgp=enable_bgp,
enable_rate_limiting=enable_rate_limiting,
enable_internet_security=enable_internet_security
)
_upsert(gateway, 'connections', conn, 'name')
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, gateway_name, gateway)
def list_vpn_gateways(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'vpn_gateways', resource_group_name)
# pylint: disable=inconsistent-return-statements
def add_vpn_gateway_connection_ipsec_policy(cmd, resource_group_name, gateway_name, connection_name,
sa_life_time_seconds, sa_data_size_kilobytes, ipsec_encryption,
ipsec_integrity, ike_encryption, ike_integrity, dh_group, pfs_group,
no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
client = network_client_factory(cmd.cli_ctx).vpn_gateways
gateway = client.get(resource_group_name, gateway_name)
conn = _find_item_at_path(gateway, 'connections.{}'.format(connection_name))
conn.ipsec_policies.append(
IpsecPolicy(
sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group
)
)
_upsert(gateway, 'connections', conn, 'name', warn=False)
poller = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, gateway_name, gateway)
try:
return _get_property(poller.result().connections, connection_name)
except AttributeError:
return
def list_vpn_conn_ipsec_policies(cmd, resource_group_name, gateway_name, connection_name):
client = network_client_factory(cmd.cli_ctx).vpn_gateways
gateway = client.get(resource_group_name, gateway_name)
conn = _find_item_at_path(gateway, 'connections.{}'.format(connection_name))
return conn.ipsec_policies
# pylint: disable=inconsistent-return-statements
def remove_vpn_conn_ipsec_policy(cmd, resource_group_name, gateway_name, connection_name, index, no_wait=False):
client = network_client_factory(cmd.cli_ctx).vpn_gateways
gateway = client.get(resource_group_name, gateway_name)
conn = _find_item_at_path(gateway, 'connections.{}'.format(connection_name))
try:
conn.ipsec_policies.pop(index - 1)
except IndexError:
raise CLIError('invalid index: {}. Index can range from 1 to {}'.format(index, len(conn.ipsec_policies)))
_upsert(gateway, 'connections', conn, 'name', warn=False)
poller = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, gateway_name, gateway)
try:
return _get_property(poller.result().connections, connection_name)
except AttributeError:
return
# endregion
# region VpnSites
def create_vpn_site(cmd, resource_group_name, vpn_site_name, ip_address,
asn=None, bgp_peering_address=None,
virtual_wan=None, location=None, tags=None,
site_key=None, address_prefixes=None, is_security_site=None,
device_vendor=None, device_model=None, link_speed=None,
peer_weight=None, no_wait=False):
client = network_client_factory(cmd.cli_ctx).vpn_sites
VpnSite, SubResource = cmd.get_models('VpnSite', 'SubResource')
site = VpnSite(
location=location,
tags=tags,
is_security_site=is_security_site,
ip_address=ip_address,
site_key=site_key,
virtual_wan=SubResource(id=virtual_wan) if virtual_wan else None,
address_space={'addressPrefixes': address_prefixes},
device_properties={
'deviceVendor': device_vendor,
'deviceModel': device_model,
'linkSpeedInMbps': link_speed
},
bgp_properties={
'asn': asn,
'bgpPeeringAddress': bgp_peering_address,
'peerWeight': peer_weight
}
)
if not any([asn, bgp_peering_address, peer_weight]):
site.bgp_properties = None
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, vpn_site_name, site)
def update_vpn_site(instance, cmd, ip_address=None, virtual_wan=None, tags=None,
site_key=None, address_prefixes=None, is_security_site=None,
device_vendor=None, device_model=None, link_speed=None,
asn=None, bgp_peering_address=None, peer_weight=None):
SubResource = cmd.get_models('SubResource')
with UpdateContext(instance) as c:
c.update_param('tags', tags, True)
c.update_param('ip_address', ip_address, False)
c.update_param('virtual_wan', SubResource(id=virtual_wan) if virtual_wan else None, False)
c.update_param('is_security_site', is_security_site, False)
c.update_param('site_key', site_key, True)
device_properties = instance.device_properties
with UpdateContext(device_properties) as c:
c.update_param('device_vendor', device_vendor, True)
c.update_param('device_model', device_model, True)
c.update_param('link_speed_in_mbps', link_speed, False)
address_space = instance.address_space
with UpdateContext(address_space) as c:
c.update_param('address_prefixes', address_prefixes, False)
bgp_properties = instance.bgp_properties
with UpdateContext(bgp_properties) as c:
c.update_param('asn', asn, False)
c.update_param('bgp_peering_address', bgp_peering_address, False)
c.update_param('peer_weight', peer_weight, False)
return instance
def list_vpn_sites(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'vpn_sites', resource_group_name)
# endregion
| [
"knack.log.get_logger",
"azure.cli.core.util.sdk_no_wait"
] | [((589, 609), 'knack.log.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (599, 609), False, 'from knack.log import get_logger\n'), ((5202, 5295), 'azure.cli.core.util.sdk_no_wait', 'sdk_no_wait', (['no_wait', 'client.create_or_update', 'resource_group_name', 'virtual_hub_name', 'hub'], {}), '(no_wait, client.create_or_update, resource_group_name,\n virtual_hub_name, hub)\n', (5213, 5295), False, 'from azure.cli.core.util import sdk_no_wait\n'), ((6928, 7021), 'azure.cli.core.util.sdk_no_wait', 'sdk_no_wait', (['no_wait', 'client.create_or_update', 'resource_group_name', 'virtual_hub_name', 'hub'], {}), '(no_wait, client.create_or_update, resource_group_name,\n virtual_hub_name, hub)\n', (6939, 7021), False, 'from azure.cli.core.util import sdk_no_wait\n'), ((7609, 7702), 'azure.cli.core.util.sdk_no_wait', 'sdk_no_wait', (['no_wait', 'client.create_or_update', 'resource_group_name', 'virtual_hub_name', 'hub'], {}), '(no_wait, client.create_or_update, resource_group_name,\n virtual_hub_name, hub)\n', (7620, 7702), False, 'from azure.cli.core.util import sdk_no_wait\n'), ((8517, 8610), 'azure.cli.core.util.sdk_no_wait', 'sdk_no_wait', (['no_wait', 'client.create_or_update', 'resource_group_name', 'virtual_hub_name', 'hub'], {}), '(no_wait, client.create_or_update, resource_group_name,\n virtual_hub_name, hub)\n', (8528, 8610), False, 'from azure.cli.core.util import sdk_no_wait\n'), ((9777, 9896), 'azure.cli.core.util.sdk_no_wait', 'sdk_no_wait', (['no_wait', 'client.create_or_update', 'resource_group_name', 'virtual_hub_name', 'route_table_name', 'route_table'], {}), '(no_wait, client.create_or_update, resource_group_name,\n virtual_hub_name, route_table_name, route_table)\n', (9788, 9896), False, 'from azure.cli.core.util import sdk_no_wait\n'), ((11048, 11167), 'azure.cli.core.util.sdk_no_wait', 'sdk_no_wait', (['no_wait', 'client.create_or_update', 'resource_group_name', 'virtual_hub_name', 'route_table_name', 'route_table'], {}), '(no_wait, client.create_or_update, resource_group_name,\n virtual_hub_name, route_table_name, route_table)\n', (11059, 11167), False, 'from azure.cli.core.util import sdk_no_wait\n'), ((12121, 12240), 'azure.cli.core.util.sdk_no_wait', 'sdk_no_wait', (['no_wait', 'client.create_or_update', 'resource_group_name', 'virtual_hub_name', 'route_table_name', 'route_table'], {}), '(no_wait, client.create_or_update, resource_group_name,\n virtual_hub_name, route_table_name, route_table)\n', (12132, 12240), False, 'from azure.cli.core.util import sdk_no_wait\n'), ((13112, 13205), 'azure.cli.core.util.sdk_no_wait', 'sdk_no_wait', (['no_wait', 'client.create_or_update', 'resource_group_name', 'gateway_name', 'gateway'], {}), '(no_wait, client.create_or_update, resource_group_name,\n gateway_name, gateway)\n', (13123, 13205), False, 'from azure.cli.core.util import sdk_no_wait\n'), ((15033, 15126), 'azure.cli.core.util.sdk_no_wait', 'sdk_no_wait', (['no_wait', 'client.create_or_update', 'resource_group_name', 'gateway_name', 'gateway'], {}), '(no_wait, client.create_or_update, resource_group_name,\n gateway_name, gateway)\n', (15044, 15126), False, 'from azure.cli.core.util import sdk_no_wait\n'), ((16458, 16551), 'azure.cli.core.util.sdk_no_wait', 'sdk_no_wait', (['no_wait', 'client.create_or_update', 'resource_group_name', 'gateway_name', 'gateway'], {}), '(no_wait, client.create_or_update, resource_group_name,\n gateway_name, gateway)\n', (16469, 16551), False, 'from azure.cli.core.util import sdk_no_wait\n'), ((17657, 17750), 'azure.cli.core.util.sdk_no_wait', 'sdk_no_wait', (['no_wait', 'client.create_or_update', 'resource_group_name', 'gateway_name', 'gateway'], {}), '(no_wait, client.create_or_update, resource_group_name,\n gateway_name, gateway)\n', (17668, 17750), False, 'from azure.cli.core.util import sdk_no_wait\n'), ((19188, 19279), 'azure.cli.core.util.sdk_no_wait', 'sdk_no_wait', (['no_wait', 'client.create_or_update', 'resource_group_name', 'vpn_site_name', 'site'], {}), '(no_wait, client.create_or_update, resource_group_name,\n vpn_site_name, site)\n', (19199, 19279), False, 'from azure.cli.core.util import sdk_no_wait\n')] |
import asyncio
from aiogram.dispatcher.middlewares import BaseMiddleware
class EnvironmentMiddleware(BaseMiddleware):
def __init__(self, context=None):
super(EnvironmentMiddleware, self).__init__()
if context is None:
context = {}
self.context = context
def update_data(self, data):
dp = self.manager.dispatcher
data.update(bot=dp.bot, dispatcher=dp,
loop=dp.loop or asyncio.get_event_loop())
if self.context:
data.update(self.context)
async def trigger(self, action, args):
if "error" not in action and action.startswith("pre_process_"):
self.update_data(args[-1])
return True
| [
"asyncio.get_event_loop"
] | [((452, 476), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (474, 476), False, 'import asyncio\n')] |
from Dao.squad_dao import SquadDao
from Model.squad import *
from Controller.backend_controller import BackendController
from Controller.frontend_controller import FrontendController
from Controller.sgbd_controller import SgbdController
class SquadController:
dao = SquadDao()
be = BackendController()
fro = FrontendController()
bd = SgbdController()
def listar_todos(self):
return self.dao.listar_todos()
def buscar_por_id(self, id):
return self.dao.buscar_por_id(id)
def salvar(self, squad:Squad):
squad.backend.idbackend = self.be.salvar(squad.backend)
squad.frontend.idfrontend = self.fro.salvar(squad.frontend)
squad.sgbd.idsgbd = self.bd.salvar(squad.sgbd)
return self.dao.salvar(squad)
def alterar(self, squad:Squad):
self.dao.alterar(squad)
def deletar(self, id):
self.dao.deletar(id)
| [
"Controller.sgbd_controller.SgbdController",
"Controller.frontend_controller.FrontendController",
"Controller.backend_controller.BackendController",
"Dao.squad_dao.SquadDao"
] | [((271, 281), 'Dao.squad_dao.SquadDao', 'SquadDao', ([], {}), '()\n', (279, 281), False, 'from Dao.squad_dao import SquadDao\n'), ((291, 310), 'Controller.backend_controller.BackendController', 'BackendController', ([], {}), '()\n', (308, 310), False, 'from Controller.backend_controller import BackendController\n'), ((321, 341), 'Controller.frontend_controller.FrontendController', 'FrontendController', ([], {}), '()\n', (339, 341), False, 'from Controller.frontend_controller import FrontendController\n'), ((351, 367), 'Controller.sgbd_controller.SgbdController', 'SgbdController', ([], {}), '()\n', (365, 367), False, 'from Controller.sgbd_controller import SgbdController\n')] |
from django import forms
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm, PasswordChangeForm, UserChangeForm
from authapp.models import UserProfile, Status
class LoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for name, item in self.fields.items():
item.widget.attrs['class'] = f'form-control {name}'
class RegisterForm(UserCreationForm, forms.ModelForm):
status = forms.CharField(label='Кто вы?', widget=forms.Select(choices=Status.choices))
class Meta:
model = UserProfile
fields = (
'username',
'first_name',
'last_name',
'status',
'email',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for name, item in self.fields.items():
item.widget.attrs['class'] = f'form-control {name}'
item.help_text = ''
class ChangeForm(UserChangeForm):
class Meta:
model = UserProfile
fields = ('username', 'first_name', 'last_name', 'email', 'address', 'phone_number')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for name, item in self.fields.items():
item.widget.attrs['class'] = 'form-control'
item.help_text = ''
if name == 'password':
item.widget = forms.HiddenInput()
class ChangePassword(PasswordChangeForm):
class Meta:
model = UserProfile
fields = 'password'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for name, item in self.fields.items():
item.widget.attrs['class'] = 'form-control'
def get_form(self, form_class):
return form_class(self.request.user, **self.get_form_kwargs())
| [
"django.forms.HiddenInput",
"django.forms.Select"
] | [((532, 568), 'django.forms.Select', 'forms.Select', ([], {'choices': 'Status.choices'}), '(choices=Status.choices)\n', (544, 568), False, 'from django import forms\n'), ((1441, 1460), 'django.forms.HiddenInput', 'forms.HiddenInput', ([], {}), '()\n', (1458, 1460), False, 'from django import forms\n')] |
# Generated by Django 2.2.6 on 2019-11-02 17:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='item',
name='category',
field=models.CharField(blank=True, choices=[('S', 'Shirt'), ('Sw', 'Sport wear'), ('Ow', 'Outwear')], max_length=10, null=True),
),
migrations.AddField(
model_name='item',
name='label',
field=models.CharField(blank=True, choices=[('P', 'primary'), ('S', 'secondary'), ('D', 'danger')], max_length=10, null=True),
),
]
| [
"django.db.models.CharField"
] | [((320, 445), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('S', 'Shirt'), ('Sw', 'Sport wear'), ('Ow', 'Outwear')]", 'max_length': '(10)', 'null': '(True)'}), "(blank=True, choices=[('S', 'Shirt'), ('Sw', 'Sport wear'),\n ('Ow', 'Outwear')], max_length=10, null=True)\n", (336, 445), False, 'from django.db import migrations, models\n'), ((558, 681), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('P', 'primary'), ('S', 'secondary'), ('D', 'danger')]", 'max_length': '(10)', 'null': '(True)'}), "(blank=True, choices=[('P', 'primary'), ('S', 'secondary'),\n ('D', 'danger')], max_length=10, null=True)\n", (574, 681), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-12-08 16:58
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('stamper', '0003_auto_20161122_1253'),
]
operations = [
migrations.AddField(
model_name='fileuploadmessage',
name='original_file_url',
field=models.CharField(default=datetime.datetime(2016, 12, 8, 16, 57, 50, 623808, tzinfo=utc), max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='imageuploadmessage',
name='original_file_url',
field=models.CharField(default=datetime.datetime(2016, 12, 8, 16, 58, 2, 437475, tzinfo=utc), max_length=255),
preserve_default=False,
),
]
| [
"datetime.datetime"
] | [((499, 561), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(12)', '(8)', '(16)', '(57)', '(50)', '(623808)'], {'tzinfo': 'utc'}), '(2016, 12, 8, 16, 57, 50, 623808, tzinfo=utc)\n', (516, 561), False, 'import datetime\n'), ((782, 843), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(12)', '(8)', '(16)', '(58)', '(2)', '(437475)'], {'tzinfo': 'utc'}), '(2016, 12, 8, 16, 58, 2, 437475, tzinfo=utc)\n', (799, 843), False, 'import datetime\n')] |
import argparse,time,os,pickle
import matplotlib.pyplot as plt
import numpy as np
from player import *
plt.switch_backend('agg')
np.set_printoptions(precision=2)
class lemon:
def __init__(self, std, num_sellers, num_actions, unit, minx):
self.std = std
self.unit = unit
self.num_sellers = num_sellers
self.num_players = num_sellers + 1
self.quality = self.transform(np.arange(num_sellers) )
self.num_actions = num_actions
self.welfare_factor = 1.5
self.listing_cost = 3
def __str__(self):
return f"Lemon({self.num_sellers}) with noise std. {self.std},\nquality: {self.quality}\n"
def transform(self, x):
return x*unit + minx
def feedback(self, actions):
rewards = np.zeros(self.num_players)
seller_actions = actions[1:]
price = self.transform( actions[0] ) - 1
sold = seller_actions * (self.quality < price) ### quality below price and is selling
supply = np.sum(sold)
if supply > 0:
avg_quality = np.sum(sold * self.quality) / supply
q_noise = np.random.randn(self.num_sellers) * 5
rewards[1:] = seller_actions * [ (self.quality + q_noise < price) * (price - self.quality) - self.listing_cost ]
rewards[0] = ( self.welfare_factor * avg_quality - price )
noise = np.random.randn(self.num_players) * self.std
rewards += noise
else:
avg_quality = 0
rewards = np.zeros(self.num_players)
rewards[1:] = - seller_actions * self.listing_cost
rewards /= self.num_players
return rewards, supply, price, avg_quality
class logger:
def __init__(self, log_dir, env, iterations, samples=None):
self.log_dir = log_dir
self.env = env
self.supply_history = []
self.demand_history = []
self.price_history = []
self.avg_quality_history = []
self.iterations = iterations
self.samples = self.iterations if not samples else samples
self.step_size = self.iterations // self.samples
self.sampled_actions = []
def write(self, text):
with open(self.log_dir+ '.log', 'a') as f:
f.write(text)
def record_round(self, t, supply, price, avg_quality, actions):
if t % self.step_size == 0:
self.supply_history.append(supply)
self.price_history.append(price)
self.avg_quality_history.append(avg_quality)
self.sampled_actions.append(actions[1:].copy())
def plot(self):
time_axis = np.arange(0, self.iterations, step=self.step_size)
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(time_axis, self.supply_history, label=f"supply")
ax1.set_ylabel('#units')
ax1.legend(loc="upper left")
ax2.plot(time_axis, self.price_history, label=f"price")
ax2.plot(time_axis, self.avg_quality_history, label=f"avg. quality")
ax2.set_ylabel('$')
ax2.set_xlabel('#round')
ax2.legend(loc="upper left")
fig.suptitle( f"Lemon({self.env.num_sellers}) with noise std. {self.env.std}")
plt.savefig(self.log_dir+ '_price' '.png')
plt.clf()
fig, ax3 = plt.subplots(1, 1)
im = ax3.imshow(np.asarray( self.sampled_actions).T, aspect="auto")
cbar = ax3.figure.colorbar(im, ax=ax3)
cbar.ax.set_ylabel("prob. to sell", rotation=-90, va="bottom")
ax3.set_yticks(np.arange(0, self.env.num_sellers, step=5))
ax3.set_ylabel('#player')
ax3.set_xlabel('#round')
fig.suptitle( f"Lemon({self.env.num_sellers}) with noise std. {self.env.std}")
plt.savefig(self.log_dir+ '_trend' '.png')
plt.clf()
with open(self.log_dir+'_history.pickle', 'wb') as f:
pickle.dump(self.sampled_actions, f)
def find_latest(prefix, suffix):
i = 0
while os.path.exists(f'{prefix}{i}{suffix}'):
i += 1
return i
if __name__ == '__main__':
parser = argparse.ArgumentParser()
describe = lambda names : ''.join( [', {}: {}'.format(i, n) for i,n in enumerate(names)] )
parser.add_argument('--std', type=float, default=0, help='noise std. in feedback')
parser.add_argument('--iterations', type=int, default=100, help='number of rounds to play')
parser.add_argument('--strategy', type=int, help='player strategy' + describe(strategy_choice_names))
parser.add_argument('--num_sellers', type=int, help='number of sellers ' )
parser.add_argument('--num_actions', type=int, help='number of buyers ')
parser.add_argument('--unit', type=float, default=1, help='discretized unit')
parser.add_argument('--minx', type=float, default=0, help='min action')
parser.add_argument('--samples', type=int, default=100, help='number of samples to save' )
parser.add_argument('--new', default=False, action='store_true', help='whether to generate a new env instance')
parser.add_argument('--num_repeat', type=int, default=1, help='number of repeated simulation')
parser.add_argument('--force_env', default=False, action='store_true', help='whether to use a specified env instance')
args = parser.parse_args()
std = args.std
iterations = args.iterations
strategy = args.strategy
num_sellers = args.num_sellers
num_buyers = 1
num_actions = args.num_actions
num_players = num_sellers+num_buyers
unit = args.unit
minx = args.minx
samples = args.samples
env_name = "lemon3"
strategy_name = strategy_choice_names[strategy]
j = 0
while j < args.num_repeat:
log_dir = f'results/{env_name}/{strategy_name}'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
print("created directory")
else:
print("existing directory")
prefix = f'results/{env_name}/{num_sellers}_{num_buyers}|{std}|{unit}|{minx}#'
if not args.force_env:
i = find_latest(prefix, '.pickle')
if not args.new and i > 0:
env_dir = prefix + str(i-1) + '.pickle'
f = open(env_dir, 'rb')
env = pickle.load(f)
print("load env at " + env_dir)
f.close()
else:
env = lemon(std, num_sellers, num_actions, unit, minx)
env_dir = prefix + str(i) + '.pickle'
f = open(env_dir, 'wb')
pickle.dump(env, f )
print("save env at "+ env_dir)
f.close()
else:
i = specified_env[j]
env_dir = prefix + str(i) + '.pickle'
if not os.path.exists(log_dir):
print("env path not found ", log_dir)
exit()
f = open(env_dir, 'rb')
env = pickle.load(f)
print("load env at " + env_dir)
f.close()
player_module = __import__('player')
if strategy != 4:
players = [getattr(player_module, strategy_name)(num_actions, iterations) ]
players.extend( [getattr(player_module, strategy_name)(2, iterations) for i in range(num_sellers) ] )
else:
a0 = 50
b0 = 0.5
a1 = 50
b1 = 0.5
players = [getattr(player_module, strategy_name)(num_actions, iterations, a0, b0) ]
players.extend( [getattr(player_module, strategy_name)(2, iterations, a1, b1) for i in range(num_sellers) ] )
print(f'beta = {players[0].beta}, b = {players[0].b}, beta = {players[1].beta}, b = {players[1].b}' )
i = find_latest(f'{log_dir}/', '.log')
log_dir = f'{log_dir}/{i}'
L = logger(log_dir, env, iterations, samples=samples)
start = time.time()
L.write("iterations: "+str(iterations) + "\n")
L.write('Environment:\n\t'+str(env)+'\n')
actions = np.zeros(num_players, dtype=int)
action_probs = np.zeros(num_players, dtype=float)
for t in range(1, iterations+1):
for i, p in enumerate(players):
actions[i] = p.act()
action_probs[i] = p.action_prob[1]
rewards, supply, price, avg_quality = env.feedback( actions )
for a, p, r in zip(actions, players, rewards ):
p.feedback(a, r)
L.record_round(t, supply, price, avg_quality, action_probs)
for i, p in enumerate(players):
L.write(f'Player{i}:\n\t{p}\n')
L.plot()
end = time.time()
print(log_dir, end-start)
j += 1
| [
"os.path.exists",
"matplotlib.pyplot.savefig",
"pickle.dump",
"argparse.ArgumentParser",
"os.makedirs",
"matplotlib.pyplot.clf",
"pickle.load",
"numpy.asarray",
"numpy.sum",
"numpy.zeros",
"numpy.random.randn",
"matplotlib.pyplot.switch_backend",
"time.time",
"matplotlib.pyplot.subplots",
... | [((103, 128), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (121, 128), True, 'import matplotlib.pyplot as plt\n'), ((131, 163), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (150, 163), True, 'import numpy as np\n'), ((3454, 3492), 'os.path.exists', 'os.path.exists', (['f"""{prefix}{i}{suffix}"""'], {}), "(f'{prefix}{i}{suffix}')\n", (3468, 3492), False, 'import argparse, time, os, pickle\n'), ((3553, 3578), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3576, 3578), False, 'import argparse, time, os, pickle\n'), ((697, 723), 'numpy.zeros', 'np.zeros', (['self.num_players'], {}), '(self.num_players)\n', (705, 723), True, 'import numpy as np\n'), ((900, 912), 'numpy.sum', 'np.sum', (['sold'], {}), '(sold)\n', (906, 912), True, 'import numpy as np\n'), ((2285, 2335), 'numpy.arange', 'np.arange', (['(0)', 'self.iterations'], {'step': 'self.step_size'}), '(0, self.iterations, step=self.step_size)\n', (2294, 2335), True, 'import numpy as np\n'), ((2358, 2376), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (2370, 2376), True, 'import matplotlib.pyplot as plt\n'), ((2789, 2829), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.log_dir + '_price.png')"], {}), "(self.log_dir + '_price.png')\n", (2800, 2829), True, 'import matplotlib.pyplot as plt\n'), ((2834, 2843), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2841, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2876), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2870, 2876), True, 'import matplotlib.pyplot as plt\n'), ((3254, 3294), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.log_dir + '_trend.png')"], {}), "(self.log_dir + '_trend.png')\n", (3265, 3294), True, 'import matplotlib.pyplot as plt\n'), ((3299, 3308), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3306, 3308), True, 'import matplotlib.pyplot as plt\n'), ((6807, 6818), 'time.time', 'time.time', ([], {}), '()\n', (6816, 6818), False, 'import argparse, time, os, pickle\n'), ((6925, 6957), 'numpy.zeros', 'np.zeros', (['num_players'], {'dtype': 'int'}), '(num_players, dtype=int)\n', (6933, 6957), True, 'import numpy as np\n'), ((6975, 7009), 'numpy.zeros', 'np.zeros', (['num_players'], {'dtype': 'float'}), '(num_players, dtype=float)\n', (6983, 7009), True, 'import numpy as np\n'), ((7443, 7454), 'time.time', 'time.time', ([], {}), '()\n', (7452, 7454), False, 'import argparse, time, os, pickle\n'), ((380, 402), 'numpy.arange', 'np.arange', (['num_sellers'], {}), '(num_sellers)\n', (389, 402), True, 'import numpy as np\n'), ((1334, 1360), 'numpy.zeros', 'np.zeros', (['self.num_players'], {}), '(self.num_players)\n', (1342, 1360), True, 'import numpy as np\n'), ((3071, 3113), 'numpy.arange', 'np.arange', (['(0)', 'self.env.num_sellers'], {'step': '(5)'}), '(0, self.env.num_sellers, step=5)\n', (3080, 3113), True, 'import numpy as np\n'), ((3369, 3405), 'pickle.dump', 'pickle.dump', (['self.sampled_actions', 'f'], {}), '(self.sampled_actions, f)\n', (3380, 3405), False, 'import argparse, time, os, pickle\n'), ((5126, 5149), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (5140, 5149), False, 'import argparse, time, os, pickle\n'), ((5157, 5177), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (5168, 5177), False, 'import argparse, time, os, pickle\n'), ((5991, 6005), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6002, 6005), False, 'import argparse, time, os, pickle\n'), ((948, 975), 'numpy.sum', 'np.sum', (['(sold * self.quality)'], {}), '(sold * self.quality)\n', (954, 975), True, 'import numpy as np\n'), ((998, 1031), 'numpy.random.randn', 'np.random.randn', (['self.num_sellers'], {}), '(self.num_sellers)\n', (1013, 1031), True, 'import numpy as np\n'), ((1227, 1260), 'numpy.random.randn', 'np.random.randn', (['self.num_players'], {}), '(self.num_players)\n', (1242, 1260), True, 'import numpy as np\n'), ((2895, 2927), 'numpy.asarray', 'np.asarray', (['self.sampled_actions'], {}), '(self.sampled_actions)\n', (2905, 2927), True, 'import numpy as np\n'), ((5508, 5522), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5519, 5522), False, 'import argparse, time, os, pickle\n'), ((5717, 5736), 'pickle.dump', 'pickle.dump', (['env', 'f'], {}), '(env, f)\n', (5728, 5736), False, 'import argparse, time, os, pickle\n'), ((5871, 5894), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (5885, 5894), False, 'import argparse, time, os, pickle\n')] |
# pylint: disable=too-many-arguments
"""
Observer implemtation doing OS command
"""
from base.iobserver import IObserver
import subprocess
import logging
import os
import sys
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(name)s: %(message)s',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
datefmt='%H:%M:%S',
stream=sys.stderr,
)
logger = logging.getLogger('osaction')
class OsAction(IObserver):
"""
Class implementing observer executing os commands
"""
# process execution timeout
_TIMEOUT = 20
def __init__(self, scope: set, name: str, cmd: tuple, useShell=False, waitToComplete=False):
self._name = name
self._cmd = cmd
self._useShell = useShell
self._waitToComplete = waitToComplete
self._scope = scope
def update(self, correlationId: str, msg: object) -> None:
"""
Executes predefined OS command
"""
if msg[0] not in self._scope: # type: ignore
return
try:
logger.info(f'{correlationId} - Execute {self._cmd}')
proc = subprocess.Popen(self._cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=self._useShell, env=os.environ.copy())
# if not set to wait - exit
if not self._waitToComplete:
return
# get command output
try:
outs, errs = proc.communicate(timeout=OsAction._TIMEOUT)
except Exception as e:
proc.kill()
outs, errs = proc.communicate()
logger.error(f'{correlationId} - {str(e)} - {str(errs)}')
logger.debug(f'{correlationId} - command output: {str(outs)}')
except Exception as e:
logger.error(f'{correlationId} - {str(e)}')
@property
def name(self) -> str:
return self._name
| [
"logging.getLogger",
"os.environ.get",
"os.environ.copy"
] | [((373, 402), 'logging.getLogger', 'logging.getLogger', (['"""osaction"""'], {}), "('osaction')\n", (390, 402), False, 'import logging\n'), ((271, 305), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""INFO"""'], {}), "('LOGLEVEL', 'INFO')\n", (285, 305), False, 'import os\n'), ((1246, 1263), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (1261, 1263), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017-2022 Univertity of Bristol - High Performance Networks Group
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, ForeignKey, Integer, String
from datetime import datetime
from werkzeug.middleware.proxy_fix import ProxyFix
from flask import Flask, Response, jsonify, render_template, request
import logging
import os
import sys
import json
import uuid
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.adapters.ruckus import RuckusWiFi
from lib.adapters.i2cat import I2catController
from conf.config import CONTROLLERS, RUCKUS_ID_MAPPING, RUCKUS_INIT_TOPOLOGY
# Logger configuration
log_filename = "logs/output.log"
os.makedirs(os.path.dirname(log_filename), exist_ok=True)
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(funcName)s %(message)s",
datefmt='%Y-%m-%d %H:%M:%S', filename=log_filename, level=logging.INFO)
logging.getLogger('requests').setLevel(logging.ERROR)
logger = logging.getLogger()
log_base = "{}:{}:{}" # INTERFACE,endpoint,REQ/RESP,content
# Flask app
app = Flask(__name__)
app.config.from_object(__name__)
# Define database
Base = declarative_base()
engine = create_engine('sqlite:///file.db', echo=False)
def generate_uuid():
return str(uuid.uuid4())
class Chunk(Base):
__tablename__ = 'chunks'
id = Column(String, primary_key=True, default=generate_uuid)
# controllers_chunk is a dictionary where the keys are the ids of the
# controller and the value is a list of the chunk in that controller
# in the form "{controllerid1:[chunkid,...],controllerid2:...}"
controllers_chunk = Column(String)
# controllers_phys is a dictionary where the keys are the ids of the
# controller and the value is a list of the chunk in that controller
# in the form "{controllerid1:[chunkid,...],controllerid2:...}"
controllers_phys = Column(String)
phyList = Column(String)
name = Column(String)
assignedQuota = Column(String)
serviceList = Column(String)
linkList = Column(String)
chunk_json = Column(String)
def __repr__(self):
return "{}, {}, {}, {}, {}, {}, {}, {}, {}".format(
self.id,
self._controllers_chunk,
self.controllers_phys,
self.phyList,
self.name,
self.assignedQuota,
self.serviceList,
self.linkList,
self.chunk_json
)
class Box(Base):
__tablename__ = 'boxes'
id = Column(String, primary_key=True, default=generate_uuid)
controller_id = Column(Integer)
box_id_controller = Column(String)
name = Column(String)
location = Column(String)
phys = Column(String)
box_json = Column(String)
def __repr__(self):
return "{}, {}, {}, {}, {}, {}, {}".format(
self.id,
self.controller_id,
self.box_id_controller,
self.name,
self.location,
self.phys,
self.box_json
)
class Phy(Base):
__tablename__ = 'phys'
id = Column(String, primary_key=True, default=generate_uuid)
controller_id = Column(Integer)
phy_id_controller = Column(String)
type = Column(String)
name = Column(String)
config = Column(String)
virtualInterfaceList = Column(String)
phy_json = Column(String)
def __repr__(self):
return "{}, {}, {}, {}, {}, {}, {}, {}".format(
self.id,
self.controller_id,
self.phy_id_controller,
self.type, self.name,
self.config,
self.virtualInterfaceList,
self.phy_json
)
class Vif(Base):
__tablename__ = 'vifs'
id = Column(String, primary_key=True, default=generate_uuid)
service_id = Column(String)
controller_id = Column(Integer)
phy_id = Column(String)
name = Column(String)
vif_json = Column(String)
def __repr__(self):
return "{}, {}, {}, {}, {}, {}".format(
self.id,
self.service_id,
self.controller_id,
self.phy_id,
self.name,
self.vif_json
)
class Vlan(Base):
__tablename__ = 'vlans'
id = Column(String, primary_key=True, default=generate_uuid)
service_id = Column(String)
tag = Column(Integer)
controllers_vlans_id = Column(String)
def __repr__(self):
return "{}, {}, {}".format(
self.id,
self.service_id,
self.tag,
self.controller_vlans_id,
)
class Service(Base):
__tablename__ = 'services'
id = Column(String, primary_key=True, default=generate_uuid)
# controllers_services is a dictionary where the keys are the ids of the
# controller and the value is a list of the chunk in that controller
# in the form "{controllerid1:[serviceid,...],controllerid2:...}"
controllers_services = Column(String)
# controllers_phys is a dictionary where the keys are the ids of the
# controller and the value is a list of the chunk in that controller
# in the form "{controllerid1:[chunkid,...],controllerid2:...}"
controllers_phys = Column(String)
lteConfigCellReserved = Column(String)
lteConfigMMEAddress = Column(String)
lteConfigMMEPort = Column(Integer)
lteConfigPLMNId = Column(String)
selectedPhys = Column(String)
selectedVifs = Column(String)
wirelessConfigEncryption = Column(String)
wirelessConfigPassword = Column(String)
wirelessConfigSSID = Column(String)
vlanId = Column(String)
service_json = Column(String)
def __repr__(self):
return "{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}".format(
self.id,
self.controllers_services,
self.controllers_phys,
self.lteConfigCellReserved,
self.lteConfigMMEAddress,
self.lteConfigMMEPort,
self.lteConfigPLMNId,
self.selectedPhys,
self.selectedVifs,
self.wirelessConfigSSID,
self.wirelessConfigEncryption,
self.wirelessConfigPassword,
self.vlanId,
self.service_json
)
# helpers to translate dabatase type class objects into dictionaries
def _dictService(service):
vlan = session.query(Vlan).filter(Vlan.service_id == service.id).one()
if service.wirelessConfigSSID:
wirelessConfig = {
"ssid": service.wirelessConfigSSID,
"encryption": service.wirelessConfigEncryption,
"password": service.wirelessConfigPassword
}
else:
wirelessConfig = None
if service.lteConfigPLMNId:
lteConfig = {
"plmnId": service.lteConfigPLMNId,
"cellReserved": service.lteConfigCellReserved,
"mmeAddress": service.lteConfigMMEAddress,
"mmePort": service.lteConfigMMEPort
}
else:
lteConfig = None
response_data = {
"id": service.id,
"serviceType": "SWAM_SERVICE",
"selectedRoot": 0,
"vlanId": {
"id": vlan.id,
"vlanId": vlan.tag
},
"selectedVifs": [{"id": x} for x in eval(service.selectedVifs)],
"wirelessConfig": wirelessConfig,
"lteConfig": lteConfig
}
return response_data
def _dictChunk(chunk):
services = session.query(Service).filter(
Service.id.in_(eval(chunk.serviceList))).all()
phys = session.query(Phy).filter(Phy.id.in_(eval(chunk.phyList))).all()
response_data = {
"id": chunk.id,
"name": chunk.name,
"assignedQuota": 0,
"serviceList": [_dictService(service) for service in services],
"physicalInterfaceList": [_dictPhy(phy) for phy in phys],
"linkList": []
}
return response_data
def _dictPhy(phy):
vifs = session.query(Vif).filter(
Vif.id.in_(eval(phy.virtualInterfaceList))).all()
if phy.config:
config = eval(phy.config)
else:
config = phy.config
response_data = {
"id": phy.id,
"name": phy.name,
"type": phy.type,
"virtualInterfaceList": [_dictVif(vif) for vif in vifs],
"config": config
}
return response_data
def _dictVif(vif):
response_data = {
"id": vif.id,
"name": vif.name,
"toRootVlan": 0,
"toAccessVlan": 0,
"toAccessPort": 0,
"toRootPort": 0,
"openFlowPortList": []
}
return response_data
# Create database session
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Initialize controller list
controllers = []
# controllers = {}
# formatter for the returned errors
API_RESPONSE = {
"OK": {
"content": '',
"code": 200
},
"CREATED": {
"content": '',
"code": 201
},
"CONTROLLER": {
"content": 'Controller Error',
"code": 503
},
"NOTFOUND": {
"content": 'Not Found',
"code": 404
},
"DB_INTEGRITY": {
"content": 'DB Integrity',
"code": 401
},
"VERIFICATION_ERROR": {
"content": 'Verification Error',
"code": 401
}
}
def errorResponder(error, message):
# TODO: implement timestamp
dt = datetime.today()
return json.dumps({
"timestamp": dt.isoformat(sep='T'),
"status": API_RESPONSE[error]["code"],
"error": API_RESPONSE[error]["content"],
"message": message,
"path": request.path
}), API_RESPONSE[error]["code"]
NORTHBOUND = "NORTHBOUND"
SOUTHBOUND = "SOUTHBOUND"
INTERNAL = "INTERNAL"
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
REQRESP = "REQ/RESP"
ROLLBACK = "ROLLBACK"
# Load controllers info from config.py and register topologies
# Look for first phy_id free in database
db_id_phy_id_list = session.query(Phy.id, Phy.phy_id_controller).all()
# db_id_list = [r for (r, a) in db_id_phy_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_phy_id = 1
# else:
# new_phy_id = db_id_list[len(db_id_list)-1]+1
# # Look for first box_id free in database
db_id_box_id_list = session.query(Box.id, Box.box_id_controller).all()
# db_id_list = [r for (r, a) in db_id_box_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_box_id = 1
# else:
# new_box_id = db_id_list[len(db_id_list)-1]+1
new_box_id = str(uuid.uuid4())
# *******************************
# Initialize proxy runtime status
# *******************************
#
# INITIAL TOPOLOGY RECOVERY (Boxes, Phys):
# =========================
# -RUCKUS type controller initial topology recovered from config.py
# -I2CAT type controller initial topology recovered from live
# SOUTHBOUND REQUEST to controller
#
# CURRENT STATE (Chunks, Services, VirtualInterfaces):
# ==============
# -RUCKUS type controller current state recovered from database and
# controllers runtime status
# -I2CAT type controller current state kept on controller
#
for item in CONTROLLERS:
if item['type'] == 'ruckus':
# Recover the list of chunks from the database
db_chunks = session.query(Chunk).all()
chunks = []
for db_chunk in db_chunks:
if eval(db_chunk.controllers_chunk)[len(controllers)]:
chunk = _dictChunk(db_chunk)
phys_to_pop = []
services_to_pop = []
for service in chunk["serviceList"]:
db_service = session.query(Service).filter(
Service.id == service["id"]).one()
if len(controllers) in \
eval(db_service.controllers_services).keys():
service["id"] = eval(db_service.controllers_services)[
len(controllers)]
else:
services_to_pop.append(service)
[chunk["serviceList"].remove(service)
for service in services_to_pop]
for phy in chunk["physicalInterfaceList"]:
try:
db_phy = session.query(Phy).filter(
Phy.id == phy["id"],
Phy.controller_id == len(controllers)).one()
phy = db_phy.phy_id_controller
except NoResultFound:
phys_to_pop.append(phy)
[chunk["physicalInterfaceList"].remove(
phy) for phy in phys_to_pop]
chunk["id"] = eval(db_chunk.controllers_chunk)[
len(controllers)]
chunks.append(chunk)
phy_id_mapping = RUCKUS_ID_MAPPING
controller = RuckusWiFi(
controller_id=item['id'],
ip=item['ip'],
port=item['port'],
url=item['url'],
topology=item['topology'],
chunks=chunks,
phy_id_mapping=phy_id_mapping,
username=item['username'],
password=item['password']
)
controllers.append(controller)
# controllers[controller.controller_id] = controller
elif item['type'] == 'i2cat':
controller = I2catController(
controller_id=item['id'],
ip=item['ip'],
port=item['port'],
url=item['url']
)
controllers.append(controller)
# controllers[controller.controller_id] = controller
for box in controller.getChunketeTopology()[0]["boxes"]:
if box['id'] not in [r for (a, r) in db_id_box_id_list]:
try:
# initial_topology["boxes"].append(box)
new_box = Box(
name=box["name"],
location=json.dumps(box["location"]),
controller_id=item['id'],
box_id_controller=box['id'],
phys=json.dumps(box["phys"]),
box_json=json.dumps(box))
session.add(new_box)
# count_phys = 0
for phy in box["phys"]:
if phy['id'] not in [r for (a, r) in db_id_phy_id_list]:
new_phy = Phy(
name=phy["name"], type=phy["type"],
controller_id=item['id'],
phy_id_controller=phy['id'],
config=str(phy["config"]),
virtualInterfaceList=json.dumps([]),
phy_json=json.dumps(phy))
session.add(new_phy)
# count_phys += 1
session.commit()
# new_phy_id += count_phys
# new_box_id += 1
except IntegrityError as ex:
session.rollback()
session.close()
def root_dir():
return os.path.abspath(os.path.dirname(__file__))
def get_file(filename):
try:
src = os.path.join(root_dir(), filename)
# Figure out how flask returns static files
# Tried:
# - render_template
# - send_file
# This should not be so non-obvious
return open(src).read()
except IOError as exc:
logger.error("Impossible to read file", exc_info=True)
return str(exc)
@app.route('/')
def root_page():
# return render_template('proxy.html')
return API_RESPONSE["OK"]["content"], API_RESPONSE["OK"]["code"]
@app.after_request
def flaskResponse(response):
body = ""
if response.get_data():
response.headers["Content-Type"] = "application/json;charset=UTF-8"
body = json.loads(response.get_data())
log_content = " '{}' {} :code:{}:body:{}".format(
request.method, request.path, response.status_code, body)
logger.info(log_base.format(NORTHBOUND, RESPONSE, log_content))
return response
@app.before_request
def before():
# todo with request
# e.g. print request.headers
pass
# Topology API implementation
@app.route('/chunkete/topology', methods=['GET'])
def getChunketeTopology():
resp = {
"boxes": [],
"links": []
}
log_content = ""
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
for index_controller in range(len(controllers)):
try:
boxes = session.query(Box).filter(
Box.controller_id == index_controller).all()
(controller_resp,
code) = controllers[index_controller].getChunketeTopology()
log_content = "controller:{}:response:{}/{}".format(
index_controller, code, controller_resp)
logger.info(log_base.format(SOUTHBOUND, REQRESP, log_content))
if code == API_RESPONSE["OK"]["code"]:
for box in controller_resp["boxes"]:
for index_phy in range(len(box["phys"])):
phy = session.query(Phy).filter(
Phy.controller_id == index_controller).filter(
Phy.phy_id_controller ==
box["phys"][index_phy]["id"]
).one()
box["phys"][index_phy]["id"] = phy.id
for db_box in boxes:
if db_box.box_id_controller == box["id"]:
box["id"] = db_box.id
break
resp["boxes"].append(box)
else:
return controller_resp, code
except NoResultFound:
return json.dumps({
"timestamp": "2019-09-10T14:18:24.866+0000",
"status": API_RESPONSE["NOTFOUND"]["code"],
"error": API_RESPONSE["NOTFOUND"]["content"],
"message": "No Result Found for the request",
"path": request.path
}), API_RESPONSE["NOTFOUND"]["code"]
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
response = jsonify(resp)
return response, API_RESPONSE["OK"]["code"]
@app.route(
'/chunkete/topology/physicalInterface/<phy_id>/LTEConfig',
methods=['PUT'])
def putInterfaceLTEConfig(phy_id):
# {
# "cellIdentity": 256,
# "earfcndl": 41690,
# "phyCellId": 5,
# "prachrootseqindex": 100,
# "primaryMMEAddress": "192.168.100.25",
# "primaryMMEPort": 333,
# "primaryPlmnId": "00101",
# "refSignalPower": -40,
# "reservedForOperatorUse": "not-reserved",
# "trackingAreaCode": 67
# }
try:
content = request.data
content_dict = json.loads(content)
log_content = "phy_id:{}:content:{}".format(phy_id, content_dict)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
if 0 > content_dict["cellIdentity"] > 256:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content_dict["earfcndl"] not in [i for j in (
range(2750, 3449),
range(41690, 43489),
range(37750, 38249)) for i in j]:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if 0 > content_dict["phyCellId"] > 500:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if 0 > content_dict["prachrootseqindex"] > 1023:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if "primaryMMEAddress" not in content_dict.keys():
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if "primaryMMEPort" not in content_dict.keys():
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if "primaryPlmnId" not in content_dict.keys():
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if -40 > content_dict["refSignalPower"] > -10:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content_dict["reservedForOperatorUse"] != "not-reserved":
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if 0 > content_dict["trackingAreaCode"] > 65535:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content:
phy = session.query(Phy).filter(Phy.id == phy_id).one()
response, code = controllers[phy.controller_id].\
putInterfaceLTEConfig(
phy.phy_id_controller, content)
log_content = "controller:{}:phy_id_controller:{}:phy_id:{}"
log_content += ":content:{}:response:{}/{}".\
format(
phy.controller_id, phy.phy_id_controller,
phy_id, content, code, response)
logger.info(log_base.format(SOUTHBOUND, REQRESP, log_content))
return jsonify(response), code
else:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except KeyError:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route(
'/chunkete/topology/physicalInterface/<phy_id>/type/<phy_type>',
methods=['PUT'])
def putInterfaceType(phy_id, phy_type):
try:
log_content = "phy_id:{}:phy_type:{}".format(phy_id, phy_type)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
phy = session.query(Phy).filter(Phy.id == phy_id).one()
response, code = controllers[phy.controller_id].putInterfaceType(
phy.phy_id_controller, phy_type)
log_content = "controller:{}:phy_id_controller:{}:phy_id:{}"
log_content += ":phy_type:{}:response:{}/{}".\
format(
phy.controller_id, phy.phyid_controller,
phy_id, phy_type, code, response)
logger.info(
log_base.format(SOUTHBOUND, REQRESP, log_content))
return response, code
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route(
'/chunkete/topology/physicalInterface/<phy_id>/wiredConfig',
methods=['PUT'])
def putInterfaceWiredConfig(phy_id):
try:
content = request.data
log_content = "phy_id:{}:content:{}".format(
phy_id, json.loads(content))
logger.info(
log_base.format(NORTHBOUND, REQUEST, log_content))
if content:
phy = session.query(Phy).filter(Phy.id == phy_id).one()
response, code = controllers[phy.controller_id].\
putInterfaceWiredConfig(
phy.phy_id_controller, content)
log_content = "controller:{}:phy_id_controller:{}:phy_id:{}"
log_content += ":response:{}/{}".\
format(
phy.controller_id, phy.phy_id_controller,
phy_id, code, response)
logger.info(
log_base.format(SOUTHBOUND, REQRESP, log_content))
return response, code
except KeyError:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route(
'/chunkete/topology/physicalInterface/<phy_id>/wirelessConfig',
methods=['PUT'])
def putInterfaceWirelessConfig(phy_id):
# Verify content
# {
# "channelBandwidth": 20,
# (Se aceptan 20, 40 y 80)
# "channelNumber": 36,
# (Se acepta cualquier canal de la banda de 2.4 y/o de la banda de 5GHz;
# según el nodo puede o no sopotar DFS así que no está restringido
# a canales "normales")
# "txPower": 2000
# (Valor en mBm; se acepta desde 0 hasta 3500 aunque lo
# normal suelen ser 2300)
# }
try:
content = request.data
log_content = "phy_id:{}:content:{}".format(
phy_id, json.loads(content))
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
content_dict = json.loads(content)
if content_dict["channelBandwidth"] not in [20, 40, 80]:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content_dict["channelNumber"] not in [i for j in (
range(1, 11),
range(36, 68, 4),
range(100, 140, 4)) for i in j]:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if 0 >= content_dict["txPower"] > 3500:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content:
phy = session.query(Phy).filter(Phy.id == phy_id).one()
response, code = controllers[phy.controller_id].\
putInterfaceWirelessConfig(phy.phy_id_controller, content)
log_content = "controller:{}:phy_id_controller:{}:phy_id:{}"
log_content += ":content:{}:response:{}/{}".\
format(
phy.controller_id, phy.phy_id_controller,
phy_id, content, code, response)
logger.info(
log_base.format(SOUTHBOUND, REQRESP, log_content))
return jsonify(response), code
else:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
return (API_RESPONSE["CREATED"]["content"],
API_RESPONSE["CREATED"]["code"])
except KeyError:
logger.error("Malformed request")
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
# Chunk API implementation
@app.route('/chunkete/chunk', methods=['GET'])
def getAllChunks():
log_content = ""
logger.info(
log_base.format(NORTHBOUND, REQUEST, log_content))
# chunks = {}
# chunk_id_list =[]
response = []
try:
db_chunks = session.query(Chunk).all()
for db_chunk in db_chunks:
response.append(_dictChunk(db_chunk))
return jsonify(response), API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route('/chunkete/chunk', methods=['POST'])
def registerNewChunk():
try:
content = request.data
log_content = "content:{}".format(json.loads(content))
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
if content:
chunk_dict = json.loads(content)
controllers_phys = {}
controllers_content = {}
# Split the phys included in the chunk per controller
for phy in chunk_dict["physicalInterfaceList"]:
phy = session.query(Phy).filter(Phy.id == phy["id"]).one()
phy_dict = json.loads(phy.phy_json)
phy_id_dict = {"id": phy_dict["id"]}
if phy.controller_id in controllers_phys.keys():
controllers_phys[phy.controller_id].append(
phy.phy_id_controller)
controllers_content[
phy.controller_id][
"physicalInterfaceList"].append(phy_id_dict)
else:
controllers_phys[phy.controller_id] = [phy.phy_id_controller]
controllers_content[phy.controller_id] = {
"name": chunk_dict["name"],
"physicalInterfaceList": [phy_id_dict],
}
if "assignedQuota" in chunk_dict.keys():
controllers_content[phy.controller_id]["assignedQuota"] = \
chunk_dict["assignedQuota"]
else:
chunk_dict["assignedQuota"] = 0
controllers_content[phy.controller_id]["assignedQuota"] = 0
if "linkList" in chunk_dict.keys():
controllers_content[phy.controller_id]["linkList"] = \
chunk_dict["linkList"]
else:
chunk_dict["linkList"] = []
controllers_content[phy.controller_id]["linkList"] = []
if "serviceList" in chunk_dict.keys():
controllers_content[phy.controller_id]["serviceList"] = \
chunk_dict["serviceList"]
else:
chunk_dict["serviceList"] = []
controllers_content[phy.controller_id]["serviceList"] = []
# # Create a new chunk and add to database
# # Get the next free ID in db
# db_id_list = session.query(Chunk.id).all()
# db_id_list = [r for (r, ) in db_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_chunk_id = 1
# else:
# new_chunk_id = db_id_list[len(db_id_list)-1]+1
# Add the chunk in the database
chunk = Chunk(
name=chunk_dict["name"],
serviceList=json.dumps([]),
assignedQuota=chunk_dict["assignedQuota"],
controllers_phys=str(controllers_phys),
phyList=str(
[phy["id"] for phy in chunk_dict["physicalInterfaceList"]]
),
linkList=json.dumps([]), chunk_json=json.dumps(chunk_dict))
session.add(chunk)
# Register the chunk on each of the controllers
controllers_chunk_dict = {}
for controller_id in controllers_content.keys():
response, code = controllers[controller_id].registerNewChunk(
json.dumps(controllers_content[controller_id]))
log_content = "controller:{}:content:{}"
log_content += ":response:{}/{}".\
format(
controller_id,
json.dumps(
controllers_content[controller_id]),
code, response)
logger.info(log_base.format(SOUTHBOUND, REQRESP, log_content))
if code == API_RESPONSE["CREATED"]["code"]:
controllers_chunk_dict[controller_id] = response["id"]
else:
return errorResponder(
"CONTROLLER", "Managed Controller returned an error")
# Update Service in Database
chunk_dict["id"] = chunk.id
chunk.chunk_json = json.dumps(chunk_dict)
chunk.controllers_chunk = str(controllers_chunk_dict)
session.commit()
return json.dumps(
{'id': chunk.id}), API_RESPONSE["CREATED"]["code"]
except KeyError:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route('/chunkete/chunk/<chunk_id>', methods=['GET'])
def getChunkById(chunk_id):
log_content = "chunk_id:{}".format(chunk_id)
logger.info(
log_base.format(NORTHBOUND, REQUEST, log_content))
try:
chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
response_data = _dictChunk(chunk)
return jsonify(
response_data), API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder(
"NOTFOUND", "Object not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route('/chunkete/chunk/<chunk_id>', methods=['DELETE'])
def removeExistingChunk(chunk_id):
log_content = "chunk_id:{}".format(chunk_id)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
try:
chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
session.close()
controllers_phys = eval(chunk.controllers_phys)
serviceList = eval(chunk.serviceList)
# Remove the Services from the chunk
while serviceList:
removeExistingSWAMService(
chunk_id, serviceList[0], interface=INTERNAL)
serviceList.pop(0)
for controller_id in controllers_phys.keys():
response, code = controllers[controller_id].removeExistingChunk(
eval(chunk.controllers_chunk)[controller_id])
log_content = "controller:{}:chunk_id:{}"
log_content += ":response:{}/{}".\
format(controller_id, chunk_id, code, response)
logger.info(log_base.format(SOUTHBOUND, REQRESP, log_content))
# Remove the chunk from the database
session.delete(chunk)
session.commit()
return API_RESPONSE["OK"]["content"], API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder("NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder("DB_INTEGRITY", "Database integrity error")
finally:
session.close()
# Service API implementation
@app.route('/chunkete/chunk/<chunk_id>/service/SWAM', methods=['GET'])
def getAllSWAMServices(chunk_id):
log_content = "chunk_id:{}".format(chunk_id)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
response = []
try:
db_chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
for service_id in eval(db_chunk.serviceList):
db_service = session.query(Service).filter(
Service.id == service_id).one()
response.append(_dictService(db_service))
return jsonify(response), API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder("NOTFOUND", "Item not found")
finally:
session.close()
@app.route('/chunkete/chunk/<chunk_id>/service/SWAM', methods=['POST'])
def registerNewSWAMService(chunk_id):
# VERIFY CONTENT
# {
# "lteConfig": { (Más info en los mails que te he pasado de accelleran)
# "cellReserved": "not-reserved",
# "mmeAddress": "192.168.50.2",
# "mmePort": 333,
# "plmnId": "00101"
# },
# "selectedPhys": [
# (Sólo se aceptan interfaces de tipo SUB6_ACCESS,
# LTE_PRIMARY_PLMN y WIRED_TUNNEL)
# 14, 23
# ],
# "vlanId": 201, (1-4095)
# "wirelessConfig": {
# "encryption": "WPA", (NONE, WPA, WPA2, WEP aceptados)
# "password": "<PASSWORD>",
# (No se aceptan espacios. Debe contener un mínimo de
# 8 caracteres o estar vacia en caso de encryption == "NONE")
# "ssid": "Test" (No se aceptan espacios)
# }
# }
PHY_TYPES = ["SUB6_ACCESS", "LTE_PRIMARY_PLMN", "WIRED_TUNNEL"]
ENCRYPTION_TYPES = ["NONE", "WPA", "WPA2", "WEP"]
# Action record for rollback in case something fails
# {
# <controller>:{
# "chunk_id": <service_id>
# "service_id": <service_id>
# }
# }
rollback_flag = True
rollback = {}
try:
content = request.data
log_content = "chunk_id:{}:content:{}".format(
chunk_id, json.loads(content))
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
if content:
service_dict = json.loads(content)
# if "lteConfig" in service_dict.keys():
if "lteConfig" in service_dict.keys():
if service_dict["lteConfig"]:
pass
# if service_dict["lteConfig"]["encryption"] not in \
# ENCRYPTION_TYPES:
# return errorResponder(
# "VERIFICATION_ERROR", "Malformed request")
# elif len(service_dict["lteConfig"]["password"]) < 8:
# if service_dict[
# "wirelessConfig"]["encryption"] != "NONE":
# return errorResponder(
# "VERIFICATION_ERROR", "Malformed request")
# elif ' ' in service_dict["lteConfig"]["ssid"]:
# return errorResponder(
# "VERIFICATION_ERROR", "Malformed request")
else:
service_dict["lteConfig"] = {
"cellReserved": None,
"mmeAddress": None,
"mmePort": None,
"plmnId": None
}
if "wirelessConfig" in service_dict.keys():
if service_dict["wirelessConfig"]:
if service_dict["wirelessConfig"]["encryption"] not in \
ENCRYPTION_TYPES:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
elif len(service_dict["wirelessConfig"]["password"]) < 8:
if service_dict[
"wirelessConfig"]["encryption"] != "NONE":
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
elif ' ' in service_dict["wirelessConfig"]["ssid"]:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
else:
service_dict["wirelessConfig"] = {
"encryption": None,
"password": None,
"ssid": None
}
if 1 > service_dict["vlanId"] > 4095:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
controllers_phys = {}
controllers_content = {}
controllers_xref = {}
selected_vifs = []
db_vifs = []
chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
for phy_id in service_dict["selectedPhys"]:
if phy_id not in eval(chunk.phyList):
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
phy = session.query(Phy).filter(Phy.id == phy_id).one()
if phy.type not in PHY_TYPES:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if phy.controller_id in controllers_phys.keys():
controllers_phys[phy.controller_id].append(phy.id)
controllers_xref[phy.controller_id].append(
phy.phy_id_controller)
controllers_content[phy.controller_id]["selectedPhys"].\
append(phy.phy_id_controller)
else:
controllers_phys[phy.controller_id] = [phy.id]
controllers_xref[phy.controller_id] = [
phy.phy_id_controller]
controllers_content[phy.controller_id] = {
"selectedPhys": [phy.phy_id_controller],
"vlanId": service_dict["vlanId"]
}
if "lteConfig" in service_dict.keys():
controllers_content[phy.controller_id]["lteConfig"] = \
service_dict["lteConfig"]
if "wirelessConfig" in service_dict.keys():
controllers_content[phy.controller_id][
"wirelessConfig"] = service_dict["wirelessConfig"]
# Create a new vif and add to database
# Get the next free ID in db
# db_id_list = session.query(Vif.id).all()
# db_id_list = [r for (r, ) in db_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_vif_id = 1
# else:
# new_vif_id = db_id_list[len(db_id_list)-1]+1
# Create a new service and add to database
# Get the next free ID in db
# db_id_list = session.query(Service.id).all()
# db_id_list = [r for (r, ) in db_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_service_id = 1
# else:
# new_service_id = db_id_list[len(db_id_list)-1]+1
# TODO: Name the new vif. At the moment, it just takes the
# phy name followed by the new_vif_id
new_vif_dict = {
'id': str(uuid.uuid4()),
'name': "",
"toRootVlan": 0,
"toAccessVlan": 0,
"toAccessPort": 0,
"toRootPort": 0,
"openFlowPortList": []
}
new_vif_dict['name'] = "{}_{}".\
format(phy.name, new_vif_dict['id'])
vif = Vif(
id=new_vif_dict['id'],
service_id="",
phy_id=phy.id,
controller_id=phy.controller_id,
vif_json=json.dumps(new_vif_dict))
session.add(vif)
db_vifs.append(vif)
selected_vifs.append(new_vif_dict['id'])
phy = session.query(Phy).filter(Phy.id == phy.id).one()
virtualInterfaceList = json.loads(phy.virtualInterfaceList)
virtualInterfaceList.append(vif.id)
phy.virtualInterfaceList = json.dumps(virtualInterfaceList)
phy_dict = json.loads(phy.phy_json)
if "virtualInterfaceList" in phy_dict:
phy_dict["virtualInterfaceList"].append(new_vif_dict)
else:
phy_dict["virtualInterfaceList"] = [new_vif_dict]
phy.phy_json = json.dumps(phy_dict)
# Add the service in the database
service = Service(
controllers_services=str({}),
controllers_phys=str(controllers_xref),
lteConfigCellReserved=service_dict[
"lteConfig"]["cellReserved"],
lteConfigMMEAddress=service_dict["lteConfig"]["mmeAddress"],
lteConfigMMEPort=service_dict["lteConfig"]["mmePort"],
lteConfigPLMNId=service_dict["lteConfig"]["plmnId"],
selectedPhys=str(service_dict["selectedPhys"]),
selectedVifs=str(selected_vifs),
wirelessConfigEncryption=service_dict[
"wirelessConfig"]["encryption"],
wirelessConfigPassword=service_dict[
"wirelessConfig"]["password"],
wirelessConfigSSID=service_dict["wirelessConfig"]["ssid"],
vlanId=service_dict["vlanId"],
service_json=json.dumps(service_dict)
)
vlan = Vlan(
tag=service_dict["vlanId"],
service_id="",
controllers_vlans_id="")
session.add(vlan)
session.add(service)
session.flush()
# Update Chunk in database
# update serviceList
serviceList = json.loads(chunk.serviceList)
serviceList.append(service.id)
chunk.serviceList = json.dumps(serviceList)
# update chunk json
service_dict["id"] = service.id
vlan.service_id = service.id
for db_vif in db_vifs:
db_vif.service_id = service.id
updated_chunk = json.loads(chunk.chunk_json)
updated_chunk["serviceList"].append(service_dict)
chunk.chunk_json = json.dumps(updated_chunk)
service.service_json = json.dumps(service_dict)
session.flush()
# Register the service on each controller
controllers_services_dict = {}
for controller_id in controllers_phys.keys():
data, code = controllers[controller_id].\
registerNewSWAMService(
eval(chunk.controllers_chunk)[controller_id],
json.dumps(controllers_content[controller_id]))
log_content = "controller:{}:chunk_id:{}:content:{}"
log_content += ":response:{}/{}".\
format(
controller_id, chunk_id,
json.dumps(controllers_content[controller_id]),
code, data)
logger.info(log_base.format(
SOUTHBOUND, REQRESP, log_content))
if code == API_RESPONSE["CREATED"]["code"]:
rollback[controller_id] = {
'chunk_id': eval(
chunk.controllers_chunk)[controller_id],
'service_id': data["id"]
}
controllers_services_dict[controller_id] = data["id"]
else:
return errorResponder(
"CONTROLLER",
"Managed Controller returned an error")
# Update and add vlan object
# vlan.service_id = service.id
# vlan.controllers_vlans_id = controllers_services_dict['vlanId']
# Update Service in Database
service.controllers_services = str(controllers_services_dict)
session.commit()
rollback_flag = False
return json.dumps(
{'id': service.id}), API_RESPONSE["CREATED"]["code"]
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
if rollback_flag:
if rollback:
for controller_id in rollback.keys():
data, code = controllers[controller_id].\
removeExistingSWAMService(
rollback[controller_id]["chunk_id"],
rollback[controller_id]["service_id"])
log_content = "controller:{}:chunk_id:{}:service_id:{}"
log_content += ":response:{}/{}".\
format(
controller_id,
rollback[controller_id]["chunk_id"],
rollback[controller_id]["service_id"],
code, data)
logger.info(log_base.format(
SOUTHBOUND, ROLLBACK, log_content))
session.close()
@app.route(
'/chunkete/chunk/<chunk_id>/service/SWAM/<service_id>',
methods=['GET'])
def getSWAMServiceById(chunk_id, service_id):
log_content = "chunk_id:{}:service_id:{}".format(chunk_id, service_id)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
try:
service = session.query(Service).filter(Service.id == service_id).one()
response_data = _dictService(service)
return jsonify(response_data), API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route(
'/chunkete/chunk/<chunk_id>/service/SWAM/<service_id>',
methods=['DELETE'])
def removeExistingSWAMService(chunk_id, service_id, interface=NORTHBOUND):
log_content = "chunk_id:{}:service_id:{}".format(chunk_id, service_id)
logger.info(log_base.format(interface, REQUEST, log_content))
controllers_phys = {}
try:
# Update Chunk in database
chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
vifs = session.query(Vif).filter(
Vif.service_id == service_id).all()
for vif in vifs:
phy = session.query(Phy).filter(Phy.id == vif.phy_id).one()
if phy.controller_id in controllers_phys.keys():
controllers_phys[phy.controller_id].append(phy.id)
else:
controllers_phys[phy.controller_id] = [phy.id]
virtualInterfaceList = eval(phy.virtualInterfaceList)
virtualInterfaceList.remove(vif.id)
phy.virtualInterfaceList = json.dumps(virtualInterfaceList)
session.delete(vif)
chunk_dict = json.loads(chunk.chunk_json)
serviceList = json.loads(chunk.serviceList)
for index in range(len(serviceList)):
if serviceList[index] == service_id:
service = session.query(Service).filter(
Service.id == service_id).one()
controllers_services_dict = eval(service.controllers_services)
for controller_id in controllers_phys.keys():
response, code = controllers[controller_id].\
removeExistingSWAMService(
eval(chunk.controllers_chunk)[controller_id],
controllers_services_dict[controller_id])
log_content = "controller:{}:chunk_id:{}:service_id:{}"
log_content += ":service_id_controller:{}:response:{}/{}".\
format(
controller_id, chunk_id,
service_id,
controllers_services_dict[controller_id],
code, response)
logger.info(log_base.format(
SOUTHBOUND, REQRESP, log_content))
chunk_dict["serviceList"].pop(index)
serviceList.pop(serviceList.index(service_id))
chunk.serviceList = json.dumps(serviceList)
chunk.chunk_json = json.dumps(chunk_dict)
vlan = session.query(Vlan).filter(
Vlan.service_id == service_id).one()
session.delete(vlan)
session.delete(service)
session.commit()
return (API_RESPONSE["OK"]["content"],
API_RESPONSE["OK"]["code"])
return errorResponder(
"NOTFOUND", "Item not found")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
app.wsgi_app = ProxyFix(app.wsgi_app)
if __name__ == '__main__':
"""main function
Default host: 0.0.0.0
Default port: 8080
Default debug: False
"""
try:
app.run(
host='0.0.0.0',
port=8008,
debug=False)
except Exception:
logging.critical(
'server: CRASHED: Got exception on main handler')
raise
| [
"logging.basicConfig",
"logging.getLogger",
"sqlalchemy.orm.sessionmaker",
"json.loads",
"lib.adapters.i2cat.I2catController",
"flask.Flask",
"sqlalchemy.create_engine",
"json.dumps",
"uuid.uuid4",
"os.path.dirname",
"werkzeug.middleware.proxy_fix.ProxyFix",
"lib.adapters.ruckus.RuckusWiFi",
... | [((1504, 1668), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s [%(levelname)s] %(funcName)s %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""', 'filename': 'log_filename', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s [%(levelname)s] %(funcName)s %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S', filename=log_filename, level=logging.INFO)\n", (1523, 1668), False, 'import logging\n'), ((1731, 1750), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1748, 1750), False, 'import logging\n'), ((1833, 1848), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1838, 1848), False, 'from flask import Flask, Response, jsonify, render_template, request\n'), ((1909, 1927), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (1925, 1927), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((1937, 1983), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///file.db"""'], {'echo': '(False)'}), "('sqlite:///file.db', echo=False)\n", (1950, 1983), False, 'from sqlalchemy import create_engine\n'), ((9354, 9379), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (9366, 9379), False, 'from sqlalchemy.orm import sessionmaker\n'), ((52594, 52616), 'werkzeug.middleware.proxy_fix.ProxyFix', 'ProxyFix', (['app.wsgi_app'], {}), '(app.wsgi_app)\n', (52602, 52616), False, 'from werkzeug.middleware.proxy_fix import ProxyFix\n'), ((1458, 1487), 'os.path.dirname', 'os.path.dirname', (['log_filename'], {}), '(log_filename)\n', (1473, 1487), False, 'import os\n'), ((2095, 2150), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)', 'default': 'generate_uuid'}), '(String, primary_key=True, default=generate_uuid)\n', (2101, 2150), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((2390, 2404), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2396, 2404), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((2642, 2656), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2648, 2656), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((2671, 2685), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2677, 2685), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((2697, 2711), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2703, 2711), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((2732, 2746), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2738, 2746), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((2765, 2779), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2771, 2779), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((2795, 2809), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2801, 2809), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((2827, 2841), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2833, 2841), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((3252, 3307), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)', 'default': 'generate_uuid'}), '(String, primary_key=True, default=generate_uuid)\n', (3258, 3307), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((3328, 3343), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (3334, 3343), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((3368, 3382), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (3374, 3382), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((3394, 3408), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (3400, 3408), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((3424, 3438), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (3430, 3438), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((3450, 3464), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (3456, 3464), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((3480, 3494), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (3486, 3494), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((3825, 3880), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)', 'default': 'generate_uuid'}), '(String, primary_key=True, default=generate_uuid)\n', (3831, 3880), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((3901, 3916), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (3907, 3916), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((3941, 3955), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (3947, 3955), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((3967, 3981), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (3973, 3981), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((3993, 4007), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (3999, 4007), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((4021, 4035), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (4027, 4035), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((4063, 4077), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (4069, 4077), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((4093, 4107), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (4099, 4107), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((4467, 4522), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)', 'default': 'generate_uuid'}), '(String, primary_key=True, default=generate_uuid)\n', (4473, 4522), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((4540, 4554), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (4546, 4554), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((4575, 4590), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (4581, 4590), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((4604, 4618), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (4610, 4618), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((4630, 4644), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (4636, 4644), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((4660, 4674), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (4666, 4674), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((4971, 5026), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)', 'default': 'generate_uuid'}), '(String, primary_key=True, default=generate_uuid)\n', (4977, 5026), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((5044, 5058), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (5050, 5058), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((5069, 5084), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (5075, 5084), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((5112, 5126), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (5118, 5126), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((5371, 5426), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)', 'default': 'generate_uuid'}), '(String, primary_key=True, default=generate_uuid)\n', (5377, 5426), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((5674, 5688), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (5680, 5688), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((5926, 5940), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (5932, 5940), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((5969, 5983), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (5975, 5983), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((6010, 6024), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (6016, 6024), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((6048, 6063), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (6054, 6063), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((6086, 6100), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (6092, 6100), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((6120, 6134), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (6126, 6134), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((6154, 6168), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (6160, 6168), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((6200, 6214), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (6206, 6214), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((6244, 6258), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (6250, 6258), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((6284, 6298), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (6290, 6298), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((6312, 6326), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (6318, 6326), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((6346, 6360), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (6352, 6360), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((10074, 10090), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (10088, 10090), False, 'from datetime import datetime\n'), ((11181, 11193), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11191, 11193), False, 'import uuid\n'), ((18910, 18923), 'flask.jsonify', 'jsonify', (['resp'], {}), '(resp)\n', (18917, 18923), False, 'from flask import Flask, Response, jsonify, render_template, request\n'), ((1188, 1213), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1203, 1213), False, 'import os\n'), ((1668, 1697), 'logging.getLogger', 'logging.getLogger', (['"""requests"""'], {}), "('requests')\n", (1685, 1697), False, 'import logging\n'), ((2022, 2034), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2032, 2034), False, 'import uuid\n'), ((13496, 13720), 'lib.adapters.ruckus.RuckusWiFi', 'RuckusWiFi', ([], {'controller_id': "item['id']", 'ip': "item['ip']", 'port': "item['port']", 'url': "item['url']", 'topology': "item['topology']", 'chunks': 'chunks', 'phy_id_mapping': 'phy_id_mapping', 'username': "item['username']", 'password': "item['password']"}), "(controller_id=item['id'], ip=item['ip'], port=item['port'], url=\n item['url'], topology=item['topology'], chunks=chunks, phy_id_mapping=\n phy_id_mapping, username=item['username'], password=item['password'])\n", (13506, 13720), False, 'from lib.adapters.ruckus import RuckusWiFi\n'), ((15665, 15690), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (15680, 15690), False, 'import os\n'), ((19502, 19521), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (19512, 19521), False, 'import json\n'), ((25612, 25631), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (25622, 25631), False, 'import json\n'), ((32249, 32271), 'json.dumps', 'json.dumps', (['chunk_dict'], {}), '(chunk_dict)\n', (32259, 32271), False, 'import json\n'), ((50500, 50528), 'json.loads', 'json.loads', (['chunk.chunk_json'], {}), '(chunk.chunk_json)\n', (50510, 50528), False, 'import json\n'), ((50551, 50580), 'json.loads', 'json.loads', (['chunk.serviceList'], {}), '(chunk.serviceList)\n', (50561, 50580), False, 'import json\n'), ((13985, 14081), 'lib.adapters.i2cat.I2catController', 'I2catController', ([], {'controller_id': "item['id']", 'ip': "item['ip']", 'port': "item['port']", 'url': "item['url']"}), "(controller_id=item['id'], ip=item['ip'], port=item['port'],\n url=item['url'])\n", (14000, 14081), False, 'from lib.adapters.i2cat import I2catController\n'), ((23733, 23752), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (23743, 23752), False, 'import json\n'), ((25496, 25515), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (25506, 25515), False, 'import json\n'), ((27854, 27871), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (27861, 27871), False, 'from flask import Flask, Response, jsonify, render_template, request\n'), ((28306, 28325), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (28316, 28325), False, 'import json\n'), ((28443, 28462), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (28453, 28462), False, 'import json\n'), ((28747, 28771), 'json.loads', 'json.loads', (['phy.phy_json'], {}), '(phy.phy_json)\n', (28757, 28771), False, 'import json\n'), ((32374, 32402), 'json.dumps', 'json.dumps', (["{'id': chunk.id}"], {}), "({'id': chunk.id})\n", (32384, 32402), False, 'import json\n'), ((33160, 33182), 'flask.jsonify', 'jsonify', (['response_data'], {}), '(response_data)\n', (33167, 33182), False, 'from flask import Flask, Response, jsonify, render_template, request\n'), ((35519, 35536), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (35526, 35536), False, 'from flask import Flask, Response, jsonify, render_template, request\n'), ((37038, 37057), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (37048, 37057), False, 'import json\n'), ((37177, 37196), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (37187, 37196), False, 'import json\n'), ((45203, 45232), 'json.loads', 'json.loads', (['chunk.serviceList'], {}), '(chunk.serviceList)\n', (45213, 45232), False, 'import json\n'), ((45308, 45331), 'json.dumps', 'json.dumps', (['serviceList'], {}), '(serviceList)\n', (45318, 45331), False, 'import json\n'), ((45559, 45587), 'json.loads', 'json.loads', (['chunk.chunk_json'], {}), '(chunk.chunk_json)\n', (45569, 45587), False, 'import json\n'), ((45681, 45706), 'json.dumps', 'json.dumps', (['updated_chunk'], {}), '(updated_chunk)\n', (45691, 45706), False, 'import json\n'), ((45742, 45766), 'json.dumps', 'json.dumps', (['service_dict'], {}), '(service_dict)\n', (45752, 45766), False, 'import json\n'), ((49105, 49127), 'flask.jsonify', 'jsonify', (['response_data'], {}), '(response_data)\n', (49112, 49127), False, 'from flask import Flask, Response, jsonify, render_template, request\n'), ((50413, 50445), 'json.dumps', 'json.dumps', (['virtualInterfaceList'], {}), '(virtualInterfaceList)\n', (50423, 50445), False, 'import json\n'), ((52882, 52948), 'logging.critical', 'logging.critical', (['"""server: CRASHED: Got exception on main handler"""'], {}), "('server: CRASHED: Got exception on main handler')\n", (52898, 52948), False, 'import logging\n'), ((21900, 21917), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (21907, 21917), False, 'from flask import Flask, Response, jsonify, render_template, request\n'), ((26810, 26827), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (26817, 26827), False, 'from flask import Flask, Response, jsonify, render_template, request\n'), ((30902, 30916), 'json.dumps', 'json.dumps', (['[]'], {}), '([])\n', (30912, 30916), False, 'import json\n'), ((31161, 31175), 'json.dumps', 'json.dumps', (['[]'], {}), '([])\n', (31171, 31175), False, 'import json\n'), ((31188, 31210), 'json.dumps', 'json.dumps', (['chunk_dict'], {}), '(chunk_dict)\n', (31198, 31210), False, 'import json\n'), ((31479, 31525), 'json.dumps', 'json.dumps', (['controllers_content[controller_id]'], {}), '(controllers_content[controller_id])\n', (31489, 31525), False, 'import json\n'), ((31706, 31752), 'json.dumps', 'json.dumps', (['controllers_content[controller_id]'], {}), '(controllers_content[controller_id])\n', (31716, 31752), False, 'import json\n'), ((43366, 43402), 'json.loads', 'json.loads', (['phy.virtualInterfaceList'], {}), '(phy.virtualInterfaceList)\n', (43376, 43402), False, 'import json\n'), ((43498, 43530), 'json.dumps', 'json.dumps', (['virtualInterfaceList'], {}), '(virtualInterfaceList)\n', (43508, 43530), False, 'import json\n'), ((43558, 43582), 'json.loads', 'json.loads', (['phy.phy_json'], {}), '(phy.phy_json)\n', (43568, 43582), False, 'import json\n'), ((43835, 43855), 'json.dumps', 'json.dumps', (['phy_dict'], {}), '(phy_dict)\n', (43845, 43855), False, 'import json\n'), ((47502, 47532), 'json.dumps', 'json.dumps', (["{'id': service.id}"], {}), "({'id': service.id})\n", (47512, 47532), False, 'import json\n'), ((51844, 51867), 'json.dumps', 'json.dumps', (['serviceList'], {}), '(serviceList)\n', (51854, 51867), False, 'import json\n'), ((51903, 51925), 'json.dumps', 'json.dumps', (['chunk_dict'], {}), '(chunk_dict)\n', (51913, 51925), False, 'import json\n'), ((18379, 18607), 'json.dumps', 'json.dumps', (["{'timestamp': '2019-09-10T14:18:24.866+0000', 'status': API_RESPONSE[\n 'NOTFOUND']['code'], 'error': API_RESPONSE['NOTFOUND']['content'],\n 'message': 'No Result Found for the request', 'path': request.path}"], {}), "({'timestamp': '2019-09-10T14:18:24.866+0000', 'status':\n API_RESPONSE['NOTFOUND']['code'], 'error': API_RESPONSE['NOTFOUND'][\n 'content'], 'message': 'No Result Found for the request', 'path':\n request.path})\n", (18389, 18607), False, 'import json\n'), ((44831, 44855), 'json.dumps', 'json.dumps', (['service_dict'], {}), '(service_dict)\n', (44841, 44855), False, 'import json\n'), ((46157, 46203), 'json.dumps', 'json.dumps', (['controllers_content[controller_id]'], {}), '(controllers_content[controller_id])\n', (46167, 46203), False, 'import json\n'), ((46426, 46472), 'json.dumps', 'json.dumps', (['controllers_content[controller_id]'], {}), '(controllers_content[controller_id])\n', (46436, 46472), False, 'import json\n'), ((14534, 14561), 'json.dumps', 'json.dumps', (["box['location']"], {}), "(box['location'])\n", (14544, 14561), False, 'import json\n'), ((14683, 14706), 'json.dumps', 'json.dumps', (["box['phys']"], {}), "(box['phys'])\n", (14693, 14706), False, 'import json\n'), ((14737, 14752), 'json.dumps', 'json.dumps', (['box'], {}), '(box)\n', (14747, 14752), False, 'import json\n'), ((42514, 42526), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (42524, 42526), False, 'import uuid\n'), ((43103, 43127), 'json.dumps', 'json.dumps', (['new_vif_dict'], {}), '(new_vif_dict)\n', (43113, 43127), False, 'import json\n'), ((15259, 15273), 'json.dumps', 'json.dumps', (['[]'], {}), '([])\n', (15269, 15273), False, 'import json\n'), ((15312, 15327), 'json.dumps', 'json.dumps', (['phy'], {}), '(phy)\n', (15322, 15327), False, 'import json\n')] |
import pytest
import gpmap
from epistasis import models
import numpy as np
import pandas as pd
import os
def test__genotypes_to_X(test_data):
# Make sure function catches bad genotype passes
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
# Duplicated
g = list(gpm.genotype)
g.extend(g)
# not in gpmap
b = list(gpm.genotype)
b.append("stupid")
bad_genotypes = [g,b]
for bad in bad_genotypes:
with pytest.raises(ValueError):
models.base._genotypes_to_X(bad,gpm,order=1,model_type="local")
# Sample through various model comobos
allowed = {"local":set([0,1]),
"global":set([-1,1])}
for d in test_data:
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
for i in range(1,gpm.length+1,1):
for model_type in ["local","global"]:
X = models.base._genotypes_to_X(gpm.genotype,
gpm,
order=i,
model_type=model_type)
assert X.shape[0] == len(gpm.genotype)
assert set(np.unique(X)).issubset(allowed[model_type])
def test_arghandler_decorator():
class Yo:
def _a(self,data=5,method=None):
return data
def _b(self,data=None,method=None):
return 6
@models.base.arghandler
def test_method(self,a=None,b=None,**kwargs):
return a, b
@models.base.arghandler
def bad_method(self,c=None,d=None,**kwargs):
return c, d
yo = Yo()
assert yo.test_method() == (None,6)
assert yo.test_method(a=5) == (5,6)
assert yo.test_method(a=10) == (10,6)
assert yo.test_method(b=10) == (None,6)
with pytest.raises(AttributeError):
yo.bad_method()
### Tests for AbstractModel:
# AbstractModel cannot be instantiated on its own, as it is designed to be a
# mixin with sklearn classes. Many methods have to be defined in subclass
# (.fit, .predict, etc.) These will not be tested here, but instead in the
# subclass tests. For methods defined here that are never redefined in subclass
# (._X, .add_gpm, etc.) we test using the simplest mixed/subclass
# (EpistasisLinearRegression).
def test_abstractmodel_predict_to_df(test_data):
"""
Test basic functionality. Real test of values will be done on .predict
for subclasses.
"""
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
# This should fail -- no fit run
with pytest.raises(Exception):
df = m.predict_to_df()
m.fit()
# This should work
df = m.predict_to_df()
assert type(df) is type(pd.DataFrame())
assert len(df) == len(d["genotype"])
# Create and fit a new model.
m = models.linear.EpistasisLinearRegression()
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
# No gpm added -- should fail
with pytest.raises(RuntimeError):
m.predict_to_df()
m.add_gpm(gpm)
m.fit()
df = m.predict_to_df(genotypes=d["genotype"][0])
assert len(df) == 1
bad_stuff = [1,{},[1,2],"STUPID",["STUPID","IS","REAL"]]
for b in bad_stuff:
with pytest.raises(ValueError):
print(f"Trying bad genotypes {b}")
m.predict_to_df(genotypes=b)
df = m.predict_to_df(genotypes=d["genotype"][:3])
assert len(df) == 3
def test_abstractmodel_predict_to_csv(test_data,tmp_path):
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
m.fit()
csv_file = os.path.join(tmp_path,"tmp.csv")
m.predict_to_csv(filename=csv_file)
assert os.path.exists(csv_file)
df = pd.read_csv(csv_file)
assert len(df) == len(d["genotype"])
# Make sure genotypes pass works
m.predict_to_csv(filename=csv_file,genotypes=d["genotype"][0])
assert os.path.exists(csv_file)
df = pd.read_csv(csv_file)
assert len(df) == 1
def test_abstractmodel_predict_to_excel(test_data,tmp_path):
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
m.fit()
excel_file = os.path.join(tmp_path,"tmp.xlsx")
m.predict_to_excel(filename=excel_file)
assert os.path.exists(excel_file)
df = pd.read_excel(excel_file)
assert len(df) == len(d["genotype"])
# Make sure genotypes pass works
m.predict_to_excel(filename=excel_file,genotypes=d["genotype"][0])
assert os.path.exists(excel_file)
df = pd.read_excel(excel_file)
assert len(df) == 1
def test_abstractmodel_add_gpm(test_data):
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
bad_gpm = [1,None,"test",[],{}]
for b in bad_gpm:
with pytest.raises(TypeError):
m.add_gpm(b)
m.add_gpm(gpm)
# Test genotype_column arg
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
bad_genotype_column = [1,None,[],{},(1,)]
for b in bad_genotype_column:
with pytest.raises(TypeError):
print(f"trying {b}")
m.add_gpm(gpm,genotype_column=b)
with pytest.raises(KeyError):
m.add_gpm(gpm,genotype_column="not_a_column")
m.add_gpm(gpm,genotype_column="genotype")
assert m.genotype_column == "genotype"
# Test phenotype_column arg
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"])
m = models.linear.EpistasisLinearRegression()
# Shouldn't work b/c no float column
with pytest.raises(ValueError):
m.add_gpm(gpm)
# Shouldn't work because there is no column with that name
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
with pytest.raises(KeyError):
m.add_gpm(gpm,phenotype_column="not_real")
# Shouldn't work because column is not numeric
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["genotype"])
with pytest.raises(ValueError):
m.add_gpm(gpm,phenotype_column="phenotype")
# Make sure it gets right column (first float that is not reserved)
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
something_else=d["phenotype"])
m.add_gpm(gpm)
assert m.phenotype_column == "coolness"
# Test uncertainty_column arg.
# Do default = None
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
assert m.uncertainty_column == "epi_zero_uncertainty"
unc = np.array(m.gpm.data.loc[:,"epi_zero_uncertainty"])
assert len(np.unique(unc)) == 1
assert np.isclose(unc[0],np.min(gpm.data.loc[:,m.phenotype_column])*1e-6)
# pass missing column
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"],
coolness=d["phenotype"],
not_float=d["genotype"])
# Send in same as phenotype
with pytest.raises(ValueError):
m.add_gpm(gpm,uncertainty_column="phenotype")
# send in not there
with pytest.raises(KeyError):
m.add_gpm(gpm,uncertainty_column="not_there")
# send in not float
with pytest.raises(ValueError):
m.add_gpm(gpm,uncertainty_column="not_float")
# Shoud work
m.add_gpm(gpm,uncertainty_column="coolness")
assert m.uncertainty_column == "coolness"
# Check final output
assert m.gpm is gpm
assert m.Xcolumns is not None
assert m.epistasis is not None
assert m._previous_X is None
def test_gpm_getter(test_data):
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
assert m.gpm is None
m.add_gpm(gpm)
assert m.gpm is gpm
def test_results_getter(test_data):
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
m.add_gpm(gpm)
assert m.results is None
m.fit()
assert isinstance(m.results,pd.DataFrame)
def test_column_getters(test_data):
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"],
uncertainty=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
assert m.genotype_column is None
assert m.phenotype_column is None
assert m.uncertainty_column is None
m.add_gpm(gpm,uncertainty_column="uncertainty")
assert m.genotype_column == "genotype"
assert m.phenotype_column == "phenotype"
assert m.uncertainty_column == "uncertainty"
def test__X_arghandler(test_data):
m = models.linear.EpistasisLinearRegression()
with pytest.raises(ValueError):
m._X()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm)
# Make sure calling _X() naked-ly populates previous_X
assert m._previous_X is None
X = m._X()
assert m._previous_X is X
# If we access after having run, make sure X is the same object
assert X is m._X()
# Should wipe out previous_X and force recalculation.
m.add_gpm(gpm)
assert X is not m._X()
# Get x for single genotype. should work. should not update _previous_X
X = m._X(d["genotype"][0])
assert len(X) == 1
assert X is not m._previous_X
# Get x for two genotypes. should work and not update _previous_X
X = m._X(d["genotype"][0:2])
assert len(X) == 2
assert X is not m._previous_X
# Get x for two genotypes. should work and not update _previous_X
X = m._X(np.array(d["genotype"][0:2]))
assert len(X) == 2
assert X is not m._previous_X
# Just keep the array, do not update previous_X
hack = np.ones((1,1))
X = m._X(data=hack)
assert X is hack
assert X is not m._previous_X
# pass in bad genotypes
with pytest.raises(ValueError):
X = m._X("NOT_A_GENOTYPE")
with pytest.raises(ValueError):
X = m._X([d["genotype"][0],"NOT_A_GENOTYPE"])
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(ValueError):
m._X(b)
def test__y_arghandler(test_data):
m = models.linear.EpistasisLinearRegression()
with pytest.raises(ValueError):
m._y()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm,phenotype_column="coolness")
assert np.array_equal(m._y(),d["phenotype"])
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(TypeError):
print(f"trying {b}")
m._y(b)
y = m._y([1.0])
assert np.array_equal(y,[1.0])
def test__yerr_arghandler(test_data):
m = models.linear.EpistasisLinearRegression()
with pytest.raises(ValueError):
m._yerr()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm,phenotype_column="coolness",uncertainty_column="uncertainty")
assert np.array_equal(m._yerr(),d["phenotype"])
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(TypeError):
print(f"trying {b}")
m._yerr(b)
y = m._yerr([1.0])
assert np.array_equal(y,[1.0])
def test__thetas_arghandler(test_data):
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm,phenotype_column="coolness",uncertainty_column="uncertainty")
# No thetas calcualted yet
with pytest.raises(RuntimeError):
m._thetas()
m.fit()
# Get thetas, calcualted
t = m._thetas()
assert len(t) == 4
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(TypeError):
print(f"trying {b}")
m._thetas(b)
y = m._thetas([1.0])
assert np.array_equal(y,[1.0])
def test__lnprior(test_data):
m = models.linear.EpistasisLinearRegression()
with pytest.raises(ValueError):
m._lnprior()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm,phenotype_column="coolness",uncertainty_column="uncertainty")
x = m._lnprior()
assert np.array_equal(x,np.zeros(len(d["genotype"])))
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(TypeError):
print(f"trying {b}")
m._lnprior(b)
y = m._lnprior([1.0])
assert np.array_equal(y,[1.0])
| [
"os.path.exists",
"gpmap.GenotypePhenotypeMap",
"numpy.ones",
"pandas.read_csv",
"numpy.unique",
"os.path.join",
"numpy.min",
"numpy.array",
"numpy.array_equal",
"pytest.raises",
"epistasis.models.base._genotypes_to_X",
"pandas.read_excel",
"pandas.DataFrame",
"epistasis.models.linear.Epis... | [((231, 307), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (257, 307), False, 'import gpmap\n'), ((2639, 2680), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (2678, 2680), False, 'from epistasis import models\n'), ((2712, 2788), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (2738, 2788), False, 'import gpmap\n'), ((3141, 3182), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (3180, 3182), False, 'from epistasis import models\n'), ((3193, 3269), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (3219, 3269), False, 'import gpmap\n'), ((3878, 3919), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (3917, 3919), False, 'from epistasis import models\n'), ((3951, 4027), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (3977, 4027), False, 'import gpmap\n'), ((4112, 4145), 'os.path.join', 'os.path.join', (['tmp_path', '"""tmp.csv"""'], {}), "(tmp_path, 'tmp.csv')\n", (4124, 4145), False, 'import os\n'), ((4197, 4221), 'os.path.exists', 'os.path.exists', (['csv_file'], {}), '(csv_file)\n', (4211, 4221), False, 'import os\n'), ((4231, 4252), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (4242, 4252), True, 'import pandas as pd\n'), ((4410, 4434), 'os.path.exists', 'os.path.exists', (['csv_file'], {}), '(csv_file)\n', (4424, 4434), False, 'import os\n'), ((4444, 4465), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (4455, 4465), True, 'import pandas as pd\n'), ((4561, 4602), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (4600, 4602), False, 'from epistasis import models\n'), ((4634, 4710), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (4660, 4710), False, 'import gpmap\n'), ((4797, 4831), 'os.path.join', 'os.path.join', (['tmp_path', '"""tmp.xlsx"""'], {}), "(tmp_path, 'tmp.xlsx')\n", (4809, 4831), False, 'import os\n'), ((4887, 4913), 'os.path.exists', 'os.path.exists', (['excel_file'], {}), '(excel_file)\n', (4901, 4913), False, 'import os\n'), ((4923, 4948), 'pandas.read_excel', 'pd.read_excel', (['excel_file'], {}), '(excel_file)\n', (4936, 4948), True, 'import pandas as pd\n'), ((5110, 5136), 'os.path.exists', 'os.path.exists', (['excel_file'], {}), '(excel_file)\n', (5124, 5136), False, 'import os\n'), ((5146, 5171), 'pandas.read_excel', 'pd.read_excel', (['excel_file'], {}), '(excel_file)\n', (5159, 5171), True, 'import pandas as pd\n'), ((5272, 5348), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (5298, 5348), False, 'import gpmap\n'), ((5395, 5436), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (5434, 5436), False, 'from epistasis import models\n'), ((5643, 5719), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (5669, 5719), False, 'import gpmap\n'), ((5766, 5807), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (5805, 5807), False, 'from epistasis import models\n'), ((6249, 6299), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']"}), "(genotype=d['genotype'])\n", (6275, 6299), False, 'import gpmap\n'), ((6309, 6350), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (6348, 6350), False, 'from epistasis import models\n'), ((6526, 6602), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (6552, 6602), False, 'import gpmap\n'), ((6787, 6862), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['genotype']"}), "(genotype=d['genotype'], phenotype=d['genotype'])\n", (6813, 6862), False, 'import gpmap\n'), ((7071, 7181), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'coolness': "d['phenotype']", 'something_else': "d['phenotype']"}), "(genotype=d['genotype'], coolness=d['phenotype'],\n something_else=d['phenotype'])\n", (7097, 7181), False, 'import gpmap\n'), ((7386, 7462), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (7412, 7462), False, 'import gpmap\n'), ((7587, 7638), 'numpy.array', 'np.array', (["m.gpm.data.loc[:, 'epi_zero_uncertainty']"], {}), "(m.gpm.data.loc[:, 'epi_zero_uncertainty'])\n", (7595, 7638), True, 'import numpy as np\n'), ((7789, 7919), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']", 'coolness': "d['phenotype']", 'not_float': "d['genotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'],\n coolness=d['phenotype'], not_float=d['genotype'])\n", (7815, 7919), False, 'import gpmap\n'), ((8708, 8784), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (8734, 8784), False, 'import gpmap\n'), ((8831, 8872), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (8870, 8872), False, 'from epistasis import models\n'), ((9010, 9086), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (9036, 9086), False, 'import gpmap\n'), ((9133, 9174), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (9172, 9174), False, 'from epistasis import models\n'), ((9351, 9459), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']", 'uncertainty': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'],\n uncertainty=d['phenotype'])\n", (9377, 9459), False, 'import gpmap\n'), ((9539, 9580), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (9578, 9580), False, 'from epistasis import models\n'), ((9933, 9974), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (9972, 9974), False, 'from epistasis import models\n'), ((10058, 10166), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']", 'uncertainty': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'],\n uncertainty=d['phenotype'])\n", (10084, 10166), False, 'import gpmap\n'), ((11152, 11167), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (11159, 11167), True, 'import numpy as np\n'), ((11654, 11695), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (11693, 11695), False, 'from epistasis import models\n'), ((11779, 11886), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'coolness': "d['phenotype']", 'uncertainty': "d['phenotype']"}), "(genotype=d['genotype'], coolness=d['phenotype'],\n uncertainty=d['phenotype'])\n", (11805, 11886), False, 'import gpmap\n'), ((12290, 12314), 'numpy.array_equal', 'np.array_equal', (['y', '[1.0]'], {}), '(y, [1.0])\n', (12304, 12314), True, 'import numpy as np\n'), ((12363, 12404), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (12402, 12404), False, 'from epistasis import models\n'), ((12491, 12598), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'coolness': "d['phenotype']", 'uncertainty': "d['phenotype']"}), "(genotype=d['genotype'], coolness=d['phenotype'],\n uncertainty=d['phenotype'])\n", (12517, 12598), False, 'import gpmap\n'), ((13044, 13068), 'numpy.array_equal', 'np.array_equal', (['y', '[1.0]'], {}), '(y, [1.0])\n', (13058, 13068), True, 'import numpy as np\n'), ((13118, 13159), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (13157, 13159), False, 'from epistasis import models\n'), ((13192, 13299), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'coolness': "d['phenotype']", 'uncertainty': "d['phenotype']"}), "(genotype=d['genotype'], coolness=d['phenotype'],\n uncertainty=d['phenotype'])\n", (13218, 13299), False, 'import gpmap\n'), ((13872, 13896), 'numpy.array_equal', 'np.array_equal', (['y', '[1.0]'], {}), '(y, [1.0])\n', (13886, 13896), True, 'import numpy as np\n'), ((13936, 13977), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (13975, 13977), False, 'from epistasis import models\n'), ((14067, 14174), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'coolness': "d['phenotype']", 'uncertainty': "d['phenotype']"}), "(genotype=d['genotype'], coolness=d['phenotype'],\n uncertainty=d['phenotype'])\n", (14093, 14174), False, 'import gpmap\n'), ((14654, 14678), 'numpy.array_equal', 'np.array_equal', (['y', '[1.0]'], {}), '(y, [1.0])\n', (14668, 14678), True, 'import numpy as np\n'), ((804, 880), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (830, 880), False, 'import gpmap\n'), ((1980, 2009), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1993, 2009), False, 'import pytest\n'), ((2892, 2916), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2905, 2916), False, 'import pytest\n'), ((3351, 3378), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3364, 3378), False, 'import pytest\n'), ((6015, 6038), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (6028, 6038), False, 'import pytest\n'), ((6402, 6427), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6415, 6427), False, 'import pytest\n'), ((6649, 6672), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (6662, 6672), False, 'import pytest\n'), ((6909, 6934), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6922, 6934), False, 'import pytest\n'), ((8069, 8094), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8082, 8094), False, 'import pytest\n'), ((8184, 8207), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (8197, 8207), False, 'import pytest\n'), ((8297, 8322), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8310, 8322), False, 'import pytest\n'), ((9984, 10009), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9997, 10009), False, 'import pytest\n'), ((11001, 11029), 'numpy.array', 'np.array', (["d['genotype'][0:2]"], {}), "(d['genotype'][0:2])\n", (11009, 11029), True, 'import numpy as np\n'), ((11284, 11309), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11297, 11309), False, 'import pytest\n'), ((11355, 11380), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11368, 11380), False, 'import pytest\n'), ((11485, 11503), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (11492, 11503), True, 'import numpy as np\n'), ((11705, 11730), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11718, 11730), False, 'import pytest\n'), ((12102, 12120), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (12109, 12120), True, 'import numpy as np\n'), ((12414, 12439), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12427, 12439), False, 'import pytest\n'), ((12850, 12868), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (12857, 12868), True, 'import numpy as np\n'), ((13491, 13518), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (13504, 13518), False, 'import pytest\n'), ((13674, 13692), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (13681, 13692), True, 'import numpy as np\n'), ((13987, 14012), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14000, 14012), False, 'import pytest\n'), ((14454, 14472), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (14461, 14472), True, 'import numpy as np\n'), ((545, 570), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (558, 570), False, 'import pytest\n'), ((584, 650), 'epistasis.models.base._genotypes_to_X', 'models.base._genotypes_to_X', (['bad', 'gpm'], {'order': '(1)', 'model_type': '"""local"""'}), "(bad, gpm, order=1, model_type='local')\n", (611, 650), False, 'from epistasis import models\n'), ((3041, 3055), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3053, 3055), True, 'import pandas as pd\n'), ((3615, 3640), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3628, 3640), False, 'import pytest\n'), ((5509, 5533), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5522, 5533), False, 'import pytest\n'), ((5901, 5925), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5914, 5925), False, 'import pytest\n'), ((7653, 7667), 'numpy.unique', 'np.unique', (['unc'], {}), '(unc)\n', (7662, 7667), True, 'import numpy as np\n'), ((7703, 7746), 'numpy.min', 'np.min', (['gpm.data.loc[:, m.phenotype_column]'], {}), '(gpm.data.loc[:, m.phenotype_column])\n', (7709, 7746), True, 'import numpy as np\n'), ((11562, 11587), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11575, 11587), False, 'import pytest\n'), ((12179, 12203), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (12192, 12203), False, 'import pytest\n'), ((12927, 12951), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (12940, 12951), False, 'import pytest\n'), ((13751, 13775), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (13764, 13775), False, 'import pytest\n'), ((14531, 14555), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (14544, 14555), False, 'import pytest\n'), ((1035, 1113), 'epistasis.models.base._genotypes_to_X', 'models.base._genotypes_to_X', (['gpm.genotype', 'gpm'], {'order': 'i', 'model_type': 'model_type'}), '(gpm.genotype, gpm, order=i, model_type=model_type)\n', (1062, 1113), False, 'from epistasis import models\n'), ((1340, 1352), 'numpy.unique', 'np.unique', (['X'], {}), '(X)\n', (1349, 1352), True, 'import numpy as np\n')] |
# File name: spyview.py
#
# This example should be run with "execfile('spyview.py')"
from numpy import pi, linspace, sinc, sqrt
from lib.file_support.spyview import SpyView
x_vec = linspace(-2 * pi, 2 * pi, 100)
y_vec = linspace(-2 * pi, 2 * pi, 100)
qt.mstart()
data = qt.Data(name='testmeasurement')
# to make the spyview meta.txt file dimension info is required:
data.add_coordinate('X',
size=len(x_vec),
start=x_vec[0],
end=x_vec[-1])
data.add_coordinate('Y',
size=len(y_vec),
start=y_vec[0],
end=y_vec[-1])
data.add_value('Z')
data.create_file()
for y in y_vec:
for x in x_vec:
result = sinc(sqrt(x**2 + y**2))
data.add_data_point(x, y, result)
qt.msleep(0.001)
data.new_block()
data.close_file()
qt.mend()
# create the spyview meta.txt file:
SpyView(data).write_meta_file()
| [
"numpy.sqrt",
"numpy.linspace",
"lib.file_support.spyview.SpyView"
] | [((183, 213), 'numpy.linspace', 'linspace', (['(-2 * pi)', '(2 * pi)', '(100)'], {}), '(-2 * pi, 2 * pi, 100)\n', (191, 213), False, 'from numpy import pi, linspace, sinc, sqrt\n'), ((222, 252), 'numpy.linspace', 'linspace', (['(-2 * pi)', '(2 * pi)', '(100)'], {}), '(-2 * pi, 2 * pi, 100)\n', (230, 252), False, 'from numpy import pi, linspace, sinc, sqrt\n'), ((839, 852), 'lib.file_support.spyview.SpyView', 'SpyView', (['data'], {}), '(data)\n', (846, 852), False, 'from lib.file_support.spyview import SpyView\n'), ((665, 686), 'numpy.sqrt', 'sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (669, 686), False, 'from numpy import pi, linspace, sinc, sqrt\n')] |
#! /usr/bin/env python3
"""
alibExp
=======================
Qt4 interface for alib explorer
To browse alib in a more user-friendly way than simple text
Item.data(1,-1) stores its data, i.e. a str or another alib
"""
# NOTE: the actual command documentation is collected from docstrings of the
# commands and is appended to __doc__ after the class has been defined.
"""
Revisions
=================
151125 completed reading functionality
151209 wordless gui, remove node
151210 edit text, add icon to tree, btAdd function
151214 added btRoot, explore root
151219 added *GetCurrent*, modified *RemoveDataSync* to suited with alib.Pop
151229 added *GetSelectedText*
160112 change data display to waText.Brief
160113 change non-editing to read only to allow scroll
160309 fixed save failure by explore root after lock.
171204 updated alibExp.GetSelectedText to return the path of selected node
fixed bug in reeWidget.ItemToPath
180102 migrate to be compatible with PyQt5
"""
try:
from PyQt4 import QtCore
from PyQt4.QtCore import QTimer
from PyQt4.QtGui import QApplication, QWidget
except ImportError or ModuleNotFoundError:
print('PyQt4 module not found, try using PyQt5')
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtCore import QTimer
from WalArt.gui.QtGui4or5 import QtGuiFinder
QtGui=QtGuiFinder()
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'alibExp.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
from WalArt import waFile,waText
iconPath=waFile.GetFolderName(waFile.Find('add.png'))
class alibTree(QtGui.QTreeWidget):
dropped = QtCore.pyqtSignal(list)
def __init__(self,parent=None):
super(alibTree,self).__init__(parent)
self.setAcceptDrops(True)
self.data=None
self.imgList=QtGui.QIcon(waFile.Join(iconPath,'list.png'))
self.imgData=QtGui.QIcon(waFile.Join(iconPath,'data.png'))
self.imgBlank=QtGui.QIcon(waFile.Join(iconPath,'blank.png'))
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
filePaths = [
str(url.toLocalFile())
for url in event.mimeData().urls()
]
self.dropped.emit(filePaths)
else:
event.ignore()
def Load(self,a):
'''load the alib into the treeWidget
'''
self.clear()
self.setHeaderLabels(['Key','Value'])
self.data=a
for t in a:
if isinstance(a[t],alib):
ti=QtGui.QTreeWidgetItem([str(t)])
self.addTopLevelItem(ti)
self.LoadToNode(ti,a[t])
ti.setIcon(0,self.imgList)
ti.setExpanded(True)
else:
ti=QtGui.QTreeWidgetItem([str(t)])
if a[t]!='':
ti.setIcon(0,self.imgData)
else:
ti.setIcon(0,self.imgBlank)
self.addTopLevelItem(ti)
ti.setData(1,0,waText.Brief(a[t],20))
ti.setData(1,-1,a[t])
#help(ti)
def LoadToNode(self,node,a):
'''load the alib to node recursively
'''
#print(a)
for t in a:
if isinstance(a[t],alib):
ti=QtGui.QTreeWidgetItem([str(t)])
node.addChild(ti)
self.LoadToNode(ti,a[t])
ti.setIcon(0,self.imgList)
ti.setExpanded(True)
else:
ti=QtGui.QTreeWidgetItem([str(t)])
if a[t]!='':
ti.setIcon(0,self.imgData)
else:
ti.setIcon(0,self.imgBlank)
i=node.addChild(ti)
ti.setData(1,0,waText.Brief(a[t],20))
ti.setData(1,-1,a[t])
def ItemFromPath(self,path):
'''Get item from a path string of keys like: a|b|c
path can also be a list of strings
'''
if isinstance(path,str):
path=path.split('|')
item=None
for i in range(len(self.data.keys())):
#print(self.topLevelItem(i).text(0))
if self.topLevelItem(i).text(0)==path[0]:
item=self.topLevelItem(i)
break
if item!=None:
k=1
while k<len(path):
found=False
for i in range(item.childCount()):
#print(item.child(i).text(0))
if item.child(i).text(0)==path[k]:
item=item.child(i)
found=True
break
if found==False:
return None
else:
k+=1
if k<len(path):
return None
return item
def ItemToPath(self,item):
fl=[item.text(0)]
p=item.parent()
while p!=None:
fl.append(p.text(0))
p=p.parent()
fl.reverse()
return fl
def RemoveNodeSync(self,item):
'''Remove the node from both the view and the alib
'''
p=item.parent()
if p==None:
self.data.Pop(waText.atoi(item.text(0)))
else:
p.data(1,-1).Pop(waText.atoi(item.text(0)))
self.Load(self.data)
from WalArt import alib
def New(d):
'''Make a new alib explorer in the dialog, and return the object
'''
a=alibExp()
a.setupUi(d)
return a
class alibExp(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Form"))
Dialog.resize(474, 414)
Dialog.setWindowIcon(QtGui.QIcon(waFile.Join(iconPath,'settings.png')))
self.verticalLayout = QtGui.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = QtGui.QGroupBox(Dialog)
self.groupBox.setMinimumSize(QtCore.QSize(0, 60))
self.groupBox.setMaximumSize(QtCore.QSize(16777215, 60))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout.addWidget(self.groupBox)
self.horizontalLayout = QtGui.QHBoxLayout(self.groupBox)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btRoot = QtGui.QToolButton(self.groupBox)
self.btRoot.setMinimumSize(QtCore.QSize(30, 30))
self.btRoot.setMaximumSize(QtCore.QSize(30, 30))
self.btRoot.setObjectName(_fromUtf8("btRoot"))
self.btRoot.setIcon(QtGui.QIcon(waFile.Join(iconPath,'circle.png')))
self.btRoot.setToolTip('Explore the root node')
self.horizontalLayout.addWidget(self.btRoot)
self.lineEdit = QtGui.QLineEdit(self.groupBox)
self.lineEdit.setMinimumSize(QtCore.QSize(0, 30))
self.lineEdit.setMaximumSize(QtCore.QSize(16777215, 30))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.horizontalLayout.addWidget(self.lineEdit)
self.btAdd = QtGui.QToolButton(self.groupBox)
self.btAdd.setMinimumSize(QtCore.QSize(30, 30))
self.btAdd.setMaximumSize(QtCore.QSize(30, 30))
self.btAdd.setObjectName(_fromUtf8("btAdd"))
self.btAdd.setIcon(QtGui.QIcon(waFile.Join(iconPath,'add.png')))
self.btAdd.setToolTip('Add this path to the tree')
self.horizontalLayout.addWidget(self.btAdd)
self.btMinus = QtGui.QPushButton(self.groupBox)
self.btMinus.setMinimumSize(QtCore.QSize(30, 30))
self.btMinus.setMaximumSize(QtCore.QSize(30, 30))
self.btMinus.setObjectName(_fromUtf8("btMinus"))
self.btMinus.setIcon(QtGui.QIcon(waFile.Join(iconPath,'minus.png')))
self.btMinus.setToolTip('Delete this node')
self.horizontalLayout.addWidget(self.btMinus)
import WalArt.gui.buttons
self.btLock = WalArt.gui.buttons.btLock(self.groupBox)
self.btLock.setMinimumSize(QtCore.QSize(30, 30))
self.btLock.setMaximumSize(QtCore.QSize(30, 30))
self.btLock.setObjectName(_fromUtf8("btLock"))
self.btLock.setToolTip('Unlock to start editing node content')
self.horizontalLayout.addWidget(self.btLock)
self.splitter = QtGui.QSplitter(Dialog)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.verticalLayout.addWidget(self.splitter)
self.treeWidget = alibTree(self.splitter)
self.treeWidget.setObjectName(_fromUtf8("treeWidget"))
self.treeWidget.headerItem().setText(0, _fromUtf8("1"))
self.plainTextEdit = QtGui.QPlainTextEdit(self.splitter)
self.plainTextEdit.setObjectName(_fromUtf8("plainTextEdit"))
self.messenger = QtGui.QLabel(Dialog)
self.messenger.setMinimumSize(QtCore.QSize(0, 30))
self.messenger.setMaximumSize(QtCore.QSize(16777215, 30))
self.messenger.setObjectName(_fromUtf8("messenger"))
self.verticalLayout.addWidget(self.messenger)
self.retranslateUi(Dialog)
#QtCore.QObject.connect(self.treeWidget, QtCore.SIGNAL(_fromUtf8("clicked(QModelIndex)")),
# self.itemSelected)
self.treeWidget.clicked.connect(self.itemSelected)
self.plainTextEdit.setAcceptDrops(True)
self.treeWidget.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
self.treeWidget.setFrameShadow(QtGui.QFrame.Plain)
self.treeWidget.setFrameShape(QtGui.QFrame.Box)
#QtCore.QObject.connect(self.btLock, QtCore.SIGNAL(_fromUtf8("clicked()")),self.btLockClicked)
self.btLock.clicked.connect(self.btLockClicked)
#QtCore.QObject.connect(self.btMinus, QtCore.SIGNAL(_fromUtf8("clicked()")),self.btMinusClicked)
self.btMinus.clicked.connect(self.btMinusClicked)
#QtCore.QObject.connect(self.btAdd, QtCore.SIGNAL(_fromUtf8("clicked()")),self.btAddClicked)
self.btAdd.clicked.connect(self.btAddClicked)
#QtCore.QObject.connect(self.btRoot, QtCore.SIGNAL(_fromUtf8("clicked()")),self.btRootClicked)
self.btRoot.clicked.connect(self.btRootClicked)
self.treeWidget.dropEvent=self.itemDropped
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.setEditing(False)
def Message(self,text):
self.messenger.setText(text)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.groupBox.setTitle(_translate("Dialog", "alibExplorer", None))
self.btAdd.setText(_translate("Dialog", "+", None))
self.btMinus.setText(_translate("Dialog", "", None))
self.btLock.setText(_translate("Dialog", "...", None))
self.messenger.setText(_translate("Dialog", "Messege", None))
def itemSelected(self, index):
item=self.treeWidget.itemFromIndex(index)
#help(item)
#d=item.data(1,0)
d=item.data(1,-1)
#self.plainTextEdit.setEnabled(not self.btLock.getState())
if isinstance(d,alib):
self.plainTextEdit.setPlainText(d.ToString(''))
else:
self.plainTextEdit.setPlainText(str(d))
self.setEditing(False)
fl=[item.text(0)]
p=item.parent()
while p!=None:
fl.append(p.text(0))
p=p.parent()
fl.reverse()
self.lineEdit.setText('|'.join(fl))
self.Message('')
def setEditing(self,b):
'''b==True for editing mode, else no editing mode
'''
self.plainTextEdit.setReadOnly(not b)
self.lineEdit.setReadOnly(b)
self.btLock.setState(not b)
if b == True:
self.Message('Modify value and hit lock to save.')
def dragEnterEvent(self,e):
e.acceptPropsedAction()
def btLockClicked(self):
editing=not self.btLock.getState()
si=self.treeWidget.selectedItems()
if editing==True:
if len(si)==0:
#self.Message('Begin editing whole alib')
self.treeWidget.data.FromString(self.plainTextEdit.toPlainText())
self.treeWidget.Load(self.treeWidget.data)
self.Message('Change saved')
self.setEditing(not editing)
return
else:
si=si[0]
#help(self.plainTextEdit)
if str(si.data(1,-1))==self.plainTextEdit.toPlainText():
self.Message('Nothing changed')
else:
#record data
v=alib.Parse(self.plainTextEdit.toPlainText())
k=si.text(0)
if si.parent()==None:
self.treeWidget.data[k]=v
else:
si.parent().data(1,-1)[k]=v
self.treeWidget.Load(self.treeWidget.data)
self.Message('Change saved')
self.btRootClicked()
else:
if len(si)==0:
self.Message('Begin editing whole alib')
self.setEditing(not editing)
def itemDropped(self,event):
#print('called')
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
filePaths = [
str(url.toLocalFile())
for url in event.mimeData().urls()
]
#print(str(event))
self.messenger.setText(filePaths[0])
self.lineEdit.setText(filePaths[0])
#self.dropped.emit(filePaths)
from WalArt import alib
self.treeWidget.Load(alib().Load(filePaths[0]))
else:
event.ignore()
def Load(self,a):
self.treeWidget.Load(a)
self.btRootClicked()
def btMinusClicked(self):
path=self.lineEdit.text()
item=self.treeWidget.ItemFromPath(path)
if item==None:
self.Message('Warning: the node does not exist')
else:
self.treeWidget.RemoveNodeSync(item)
self.Message('node{%s} deleted'%path)
def btAddClicked(self):
path=self.lineEdit.text()
item=self.treeWidget.ItemFromPath(path)
if item==None:
self.treeWidget.data.setValue(path,'')
self.treeWidget.Load(self.treeWidget.data)
self.Message('Node{%s} added'%path)
else:
try:
#insert a number and shift other number forward,
#for convenience of auto numbering
i=int(item.text(0))
if item.parent()==None:
a=self.treeWidget.data
else:
a=item.parent().data(1,-1)
boundary=i
while str(boundary) in a:
boundary+=1
while boundary!=i:
a[str(boundary)]=a[str(boundary-1)]
boundary-=1
a[str(i)]=''
self.treeWidget.Load(self.treeWidget.data)
self.Message('Node{%s} added with shifts'%path)
except ValueError:
self.Message('Node{%s} already exists, nothing added'%path)
def btRootClicked(self):
d=self.treeWidget.data
si=self.treeWidget.selectedItems()
#self.treeWidget.deselectAll()
for i in si:
i.setSelected(False)
#self.plainTextEdit.setEnabled(not self.btLock.getState())
if isinstance(d,alib):
self.plainTextEdit.setPlainText(d.ToString(''))
else:
self.plainTextEdit.setPlainText(str(d))
self.setEditing(False)
self.lineEdit.setText('')
self.Message('Root node explored')
def GetCurrent(self):
'''returns the object that is currently exploring
it is the data of selected treenode, or the whole alib if nothing is selected
'''
si=self.treeWidget.selectedItems()
if len(si)>0:
return si[0].data(1,-1)
else:
return self.treeWidget.data
def GetSelectedText(self):
'''Similar as GetCurrent, but returns the path of selected node'''
si=self.treeWidget.selectedItems()
if len(si)>0:
return '|'.join(self.treeWidget.ItemToPath(si[0]))
else:
return ''
import sys
import time
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = alibExp()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
time.sleep(5)
| [
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"time.sleep",
"WalArt.gui.QtGui4or5.QtGuiFinder",
"WalArt.waFile.Join",
"WalArt.waFile.Find",
"WalArt.alib",
"WalArt.waText.Brief",
"PyQt5.QtCore.QSize"
] | [((1364, 1377), 'WalArt.gui.QtGui4or5.QtGuiFinder', 'QtGuiFinder', ([], {}), '()\n', (1375, 1377), False, 'from WalArt.gui.QtGui4or5 import QtGuiFinder\n'), ((2062, 2084), 'WalArt.waFile.Find', 'waFile.Find', (['"""add.png"""'], {}), "('add.png')\n", (2073, 2084), False, 'from WalArt import waFile, waText\n'), ((2136, 2159), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['list'], {}), '(list)\n', (2153, 2159), False, 'from PyQt5 import QtCore\n'), ((17527, 17540), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (17537, 17540), False, 'import time\n'), ((11190, 11235), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Dialog'], {}), '(Dialog)\n', (11227, 11235), False, 'from PyQt5 import QtCore\n'), ((2332, 2365), 'WalArt.waFile.Join', 'waFile.Join', (['iconPath', '"""list.png"""'], {}), "(iconPath, 'list.png')\n", (2343, 2365), False, 'from WalArt import waFile, waText\n'), ((2399, 2432), 'WalArt.waFile.Join', 'waFile.Join', (['iconPath', '"""data.png"""'], {}), "(iconPath, 'data.png')\n", (2410, 2432), False, 'from WalArt import waFile, waText\n'), ((2467, 2501), 'WalArt.waFile.Join', 'waFile.Join', (['iconPath', '"""blank.png"""'], {}), "(iconPath, 'blank.png')\n", (2478, 2501), False, 'from WalArt import waFile, waText\n'), ((6841, 6860), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(60)'], {}), '(0, 60)\n', (6853, 6860), False, 'from PyQt5 import QtCore\n'), ((6899, 6925), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(16777215)', '(60)'], {}), '(16777215, 60)\n', (6911, 6925), False, 'from PyQt5 import QtCore\n'), ((7270, 7290), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(30)', '(30)'], {}), '(30, 30)\n', (7282, 7290), False, 'from PyQt5 import QtCore\n'), ((7327, 7347), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(30)', '(30)'], {}), '(30, 30)\n', (7339, 7347), False, 'from PyQt5 import QtCore\n'), ((7692, 7711), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(30)'], {}), '(0, 30)\n', (7704, 7711), False, 'from PyQt5 import QtCore\n'), ((7750, 7776), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(16777215)', '(30)'], {}), '(16777215, 30)\n', (7762, 7776), False, 'from PyQt5 import QtCore\n'), ((7980, 8000), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(30)', '(30)'], {}), '(30, 30)\n', (7992, 8000), False, 'from PyQt5 import QtCore\n'), ((8036, 8056), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(30)', '(30)'], {}), '(30, 30)\n', (8048, 8056), False, 'from PyQt5 import QtCore\n'), ((8396, 8416), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(30)', '(30)'], {}), '(30, 30)\n', (8408, 8416), False, 'from PyQt5 import QtCore\n'), ((8454, 8474), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(30)', '(30)'], {}), '(30, 30)\n', (8466, 8474), False, 'from PyQt5 import QtCore\n'), ((8849, 8869), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(30)', '(30)'], {}), '(30, 30)\n', (8861, 8869), False, 'from PyQt5 import QtCore\n'), ((8906, 8926), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(30)', '(30)'], {}), '(30, 30)\n', (8918, 8926), False, 'from PyQt5 import QtCore\n'), ((9748, 9767), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(30)'], {}), '(0, 30)\n', (9760, 9767), False, 'from PyQt5 import QtCore\n'), ((9807, 9833), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(16777215)', '(30)'], {}), '(16777215, 30)\n', (9819, 9833), False, 'from PyQt5 import QtCore\n'), ((3920, 3942), 'WalArt.waText.Brief', 'waText.Brief', (['a[t]', '(20)'], {}), '(a[t], 20)\n', (3932, 3942), False, 'from WalArt import waFile, waText\n'), ((4654, 4676), 'WalArt.waText.Brief', 'waText.Brief', (['a[t]', '(20)'], {}), '(a[t], 20)\n', (4666, 4676), False, 'from WalArt import waFile, waText\n'), ((6581, 6618), 'WalArt.waFile.Join', 'waFile.Join', (['iconPath', '"""settings.png"""'], {}), "(iconPath, 'settings.png')\n", (6592, 6618), False, 'from WalArt import waFile, waText\n'), ((7444, 7479), 'WalArt.waFile.Join', 'waFile.Join', (['iconPath', '"""circle.png"""'], {}), "(iconPath, 'circle.png')\n", (7455, 7479), False, 'from WalArt import waFile, waText\n'), ((8150, 8182), 'WalArt.waFile.Join', 'waFile.Join', (['iconPath', '"""add.png"""'], {}), "(iconPath, 'add.png')\n", (8161, 8182), False, 'from WalArt import waFile, waText\n'), ((8574, 8608), 'WalArt.waFile.Join', 'waFile.Join', (['iconPath', '"""minus.png"""'], {}), "(iconPath, 'minus.png')\n", (8585, 8608), False, 'from WalArt import waFile, waText\n'), ((14565, 14571), 'WalArt.alib', 'alib', ([], {}), '()\n', (14569, 14571), False, 'from WalArt import alib\n')] |
"""
Integration test for the Luigi wrapper of AWS Batch
Requires:
- boto3 package
- Amazon AWS credentials discoverable by boto3 (e.g., by using ``aws configure``
from awscli_)
- An enabled AWS Batch job queue configured to run on a compute environment.
Written and maintained by <NAME> (@jfeala) for Outlier Bio (@outlierbio)
"""
import unittest
try:
from ob_pipelines.batch import BatchTask, BatchJobException, client, _get_job_status
except ImportError:
raise unittest.SkipTest('boto3 is not installed. BatchTasks require boto3')
TEST_JOB_DEF = {
'jobDefinitionName': 'hello-world',
'type': 'container',
'parameters': {
'message': 'hll wrld'
},
'containerProperties': {
'image': 'centos',
'command': ['/bin/echo', 'Ref::message'],
'vcpus': 2,
'memory': 4,
}
}
class BatchTaskNoOutput(BatchTask):
def complete(self):
if self.batch_job_id:
return _get_job_status(self.batch_job_id) == 'SUCCEEDED'
return False
class BatchTaskOverrideCommand(BatchTaskNoOutput):
@property
def command(self):
return ['/bin/sleep', '10']
class BatchTaskOverrideFailingCommand(BatchTaskNoOutput):
@property
def command(self):
return ['not', 'a', 'command']
class BatchTaskNonzeroExitCommand(BatchTaskNoOutput):
@property
def command(self):
return ['exit', '1']
class TestBatchTask(unittest.TestCase):
def setUp(self):
# Register the test task definition
response = client.register_job_definition(**TEST_JOB_DEF)
self.arn = response['jobDefinitionArn']
def test_unregistered_task(self):
t = BatchTaskNoOutput(job_def=TEST_JOB_DEF, job_name='test_unregistered')
t.run()
def test_registered_task(self):
t = BatchTaskNoOutput(job_def_arn=self.arn, job_name='test_registered')
t.run()
def test_override_command(self):
t = BatchTaskOverrideCommand(job_def_arn=self.arn, job_name='test_override')
t.run()
def test_failing_command(self):
t = BatchTaskOverrideFailingCommand(job_def_arn=self.arn, job_name='test_failure')
with self.assertRaises(BatchJobException):
t.run()
def test_nonzero_exit(self):
t = BatchTaskNonzeroExitCommand(job_def_arn=self.arn, job_name='test_nonzero_exit')
with self.assertRaises(BatchJobException):
t.run()
| [
"ob_pipelines.batch.client.register_job_definition",
"ob_pipelines.batch._get_job_status",
"unittest.SkipTest"
] | [((477, 546), 'unittest.SkipTest', 'unittest.SkipTest', (['"""boto3 is not installed. BatchTasks require boto3"""'], {}), "('boto3 is not installed. BatchTasks require boto3')\n", (494, 546), False, 'import unittest\n'), ((1541, 1587), 'ob_pipelines.batch.client.register_job_definition', 'client.register_job_definition', ([], {}), '(**TEST_JOB_DEF)\n', (1571, 1587), False, 'from ob_pipelines.batch import BatchTask, BatchJobException, client, _get_job_status\n'), ((956, 990), 'ob_pipelines.batch._get_job_status', '_get_job_status', (['self.batch_job_id'], {}), '(self.batch_job_id)\n', (971, 990), False, 'from ob_pipelines.batch import BatchTask, BatchJobException, client, _get_job_status\n')] |
import os
import uuid
import logging
import json
from json import JSONEncoder
from pythonjsonlogger import jsonlogger
from datetime import datetime
from logging.config import dictConfig
# Custom JSON encoder which enforce standard ISO 8601 format, UUID format
class ModelJsonEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, UUID):
return str(o)
if isinstance(o, datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
class LogFilter(logging.Filter):
def __init__(self, service=None, instance=None):
self.service = service
self.instance = instance
def filter(self, record):
record.service = self.service
record.instance = self.instance
return True
class JsonLogFormatter(jsonlogger.JsonFormatter):
def add_fields(self, log_record, record, message_dict):
super().add_fields(log_record, record, message_dict)
# Add timestamp field with default : now
if not log_record.get('timestamp'):
now = datetime.utcnow().isoformat()
log_record['timestamp'] = now
# Add level field
if log_record.get('level'):
log_record['level'] = log_record['level'].upper()
else:
log_record['level'] = record.levelname
# Add type field for internal logs
if not log_record.get('type'):
log_record['type'] = 'internal'
# Configure Logging
def configure_logging(level='DEBUG', service=None, instance=None):
dictConfig({
'version': 1,
'formatters': {'default': {
'()': JsonLogFormatter,
'format': '%(timestamp)s %(level)s %(service)s %(instance)s %(type)s %(message)s',
'json_encoder': ModelJsonEncoder
}},
'filters': {'default': {
'()': LogFilter,
'service': service,
'instance': instance
}},
'handlers': {'default_handler': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
'filters': ['default'],
'formatter': 'default'
}},
'root': {
'level': level,
'handlers': ['default_handler']
}
}) | [
"logging.config.dictConfig",
"json.JSONEncoder.default",
"datetime.datetime.utcnow"
] | [((1569, 2085), 'logging.config.dictConfig', 'dictConfig', (["{'version': 1, 'formatters': {'default': {'()': JsonLogFormatter, 'format':\n '%(timestamp)s %(level)s %(service)s %(instance)s %(type)s %(message)s',\n 'json_encoder': ModelJsonEncoder}}, 'filters': {'default': {'()':\n LogFilter, 'service': service, 'instance': instance}}, 'handlers': {\n 'default_handler': {'class': 'logging.StreamHandler', 'stream':\n 'ext://sys.stdout', 'filters': ['default'], 'formatter': 'default'}},\n 'root': {'level': level, 'handlers': ['default_handler']}}"], {}), "({'version': 1, 'formatters': {'default': {'()': JsonLogFormatter,\n 'format':\n '%(timestamp)s %(level)s %(service)s %(instance)s %(type)s %(message)s',\n 'json_encoder': ModelJsonEncoder}}, 'filters': {'default': {'()':\n LogFilter, 'service': service, 'instance': instance}}, 'handlers': {\n 'default_handler': {'class': 'logging.StreamHandler', 'stream':\n 'ext://sys.stdout', 'filters': ['default'], 'formatter': 'default'}},\n 'root': {'level': level, 'handlers': ['default_handler']}})\n", (1579, 2085), False, 'from logging.config import dictConfig\n'), ((467, 500), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'o'], {}), '(self, o)\n', (491, 500), False, 'import json\n'), ((1088, 1105), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1103, 1105), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python
import collections
from pathlib import Path
# noinspection PyUnresolvedReferences
import vtkmodules.vtkInteractionStyle
# noinspection PyUnresolvedReferences
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.vtkCommonColor import vtkNamedColors
from vtkmodules.vtkCommonCore import vtkLookupTable
from vtkmodules.vtkCommonMath import vtkMatrix4x4
from vtkmodules.vtkCommonTransforms import vtkTransform
from vtkmodules.vtkFiltersCore import (
vtkContourFilter,
vtkDecimatePro,
vtkExecutionTimer,
vtkFlyingEdges3D,
vtkMarchingCubes,
vtkPolyDataNormals,
vtkStripper,
vtkWindowedSincPolyDataFilter
)
from vtkmodules.vtkFiltersGeneral import vtkTransformPolyDataFilter
from vtkmodules.vtkIOImage import vtkMetaImageReader
from vtkmodules.vtkImagingCore import (
vtkImageShrink3D,
vtkImageThreshold
)
from vtkmodules.vtkImagingGeneral import vtkImageGaussianSmooth
from vtkmodules.vtkImagingMorphological import vtkImageIslandRemoval2D
from vtkmodules.vtkInteractionWidgets import vtkOrientationMarkerWidget
from vtkmodules.vtkRenderingAnnotation import vtkAxesActor
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkPolyDataMapper,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer
)
def get_program_parameters(argv):
import argparse
description = 'Display all frog parts and translucent skin.'
epilogue = '''
To specify all the tissues at once:
blood brain duodenum eye_retna eye_white heart ileum kidney l_intestine liver lung nerve skeleton spleen stomach skin
You can leave out brainbin, it is the brain with no gaussian smoothing.
Here are the parameters used to get the views in the VTK Textbook:
Fig12-9a:
blood brain duodenum eye_retna eye_white heart ileum kidney l_intestine liver lung nerve skeleton spleen stomach skin -a
Fig12-9b:
blood brain duodenum eye_retna eye_white heart ileum kidney l_intestine liver lung nerve skeleton spleen stomach -a
Fig12-9c:
brain duodenum eye_retna eye_white heart ileum kidney l_intestine liver lung nerve spleen stomach -c
Fig12-9c:
brain duodenum eye_retna eye_white heart ileum kidney l_intestine liver lung nerve spleen stomach -d
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
group = parser.add_mutually_exclusive_group()
group.add_argument('-a', action='store_const', dest='view', const='a',
help='The view corresponds to Figs 12-9a and 12-9b in the VTK Textbook')
group.add_argument('-c', action='store_const', dest='view', const='c',
help='The view corresponds to Figs 12-9c in the VTK Textbook')
group.add_argument('-d', action='store_const', dest='view', const='d',
help='The view corresponds to Figs 12-9d in the VTK Textbook')
parser.set_defaults(type=None)
parser.add_argument('-m', action='store_false', dest='flying_edges',
help='Use flying edges by default, marching cubes if set.')
parser.add_argument('-t', action='store_true', dest='decimation',
help='Decimate if set.')
parser.add_argument('data_folder', help='The path to the files: frog.mhd and frogtissue.mhd.')
parser.add_argument('tissues', nargs='+', help='List of one or more tissues.')
args = parser.parse_args()
return args.data_folder, args.tissues, args.view, args.flying_edges, args.decimation
def main(data_folder, tissues, view, flying_edges, decimate):
colors = vtkNamedColors()
path = Path(data_folder)
if path.is_dir():
s = ''
frog_fn = path.joinpath('frog').with_suffix('.mhd')
if not frog_fn.is_file():
s += 'The file: {:s} does not exist.\n'.format(str(frog_fn))
print(s)
frog_tissue_fn = path.joinpath('frogtissue').with_suffix('.mhd')
if not frog_tissue_fn.is_file():
s += 'The file: {:s} does not exist.'.format(str(frog_tissue_fn))
if s:
print(s)
return
else:
print('Expected a path to frog.mhs and frogtissue.mhd')
return
# Tissue parameters
available_tissues = tissue_parameters()
selected_tissues = {key: available_tissues[key] for key in tissues}
if not selected_tissues:
print('No tissues!')
return
missing_parameters = False
for k, v in selected_tissues.items():
res = check_for_required_parameters(k, v)
if res:
print(res)
missing_parameters = True
if missing_parameters:
print('Some required parameters are missing!')
return
# Setup render window, renderer, and interactor.
renderer = vtkRenderer()
render_window = vtkRenderWindow()
render_window.AddRenderer(renderer)
render_window_interactor = vtkRenderWindowInteractor()
render_window_interactor.SetRenderWindow(render_window)
lut = create_frog_lut(colors)
# Time some filters
ict = collections.defaultdict(dict)
for name, tissue in selected_tissues.items():
print('Tissue: {:>9s}, label: {:2d}'.format(name, tissue['TISSUE']))
t, actor = create_frog_actor(frog_fn, frog_tissue_fn, tissue, flying_edges, decimate, lut)
ict[name] = t
renderer.AddActor(actor)
# Initial view (looking down on the dorsal surface).
renderer.GetActiveCamera().Roll(-90)
renderer.ResetCamera()
# Final view
if view:
if view == 'a':
# Figs 12-9a and 12-9b in the VTK Textbook
camera = renderer.GetActiveCamera()
camera.SetPosition(-850.073854, 834.142692, 266.017598)
camera.SetFocalPoint(-72.387897, 109.637349, -306.343185)
camera.SetViewUp(0.284585, -0.387303, 0.876931)
camera.SetDistance(1207.186939)
camera.SetClippingRange(247.737449, 1758.922849)
elif view == 'c':
# Figs 12-9c in the VTK Textbook
camera = renderer.GetActiveCamera()
camera.SetPosition(-438.993734, 404.715262, 105.797836)
camera.SetFocalPoint(-254.193794, 245.672169, -95.535892)
camera.SetViewUp(0.256893, -0.629643, 0.733182)
camera.SetDistance(316.197712)
camera.SetClippingRange(0.789810, 789.809963)
elif view == 'd':
# Fig 12-9d in the VTK Textbook
camera = renderer.GetActiveCamera()
camera.SetPosition(-262.252604, 229.863144, 562.084505)
camera.SetFocalPoint(-288.693092, 228.870041, -91.185421)
camera.SetViewUp(0.729526, -0.683360, -0.028488)
camera.SetDistance(653.805539)
camera.SetClippingRange(452.459105, 905.003135)
print('Timings:')
print('\n'.join(format_timings(ict)))
renderer.SetBackground(colors.GetColor3d('LightSteelBlue'))
render_window.SetSize(640, 640)
render_window.SetWindowName('FrogReconstruction')
render_window.Render()
axes = vtkAxesActor()
widget = vtkOrientationMarkerWidget()
rgba = [0.0, 0.0, 0.0, 0.0]
colors.GetColor("Carrot", rgba)
widget.SetOutlineColor(rgba[0], rgba[1], rgba[2])
widget.SetOrientationMarker(axes)
widget.SetInteractor(render_window_interactor)
widget.SetViewport(0.0, 0.0, 0.2, 0.2)
widget.SetEnabled(1)
widget.InteractiveOn()
render_window.Render()
render_window_interactor.Start()
def create_frog_actor(frog_fn, frog_tissue_fn, tissue, flying_edges, decimate, lut):
# Get the tissue parameters
pixel_size = tissue['PIXEL_SIZE']
columns = tissue['COLUMNS']
rows = tissue['ROWS']
voi = tissue['VOI']
spacing = float(tissue['SPACING'])
start_slice = float(tissue['START_SLICE'])
data_spacing = [pixel_size, pixel_size, spacing]
data_origin = [-(columns / 2.0) * pixel_size, -(rows / 2.0) * pixel_size, start_slice * spacing]
#
# adjust y bounds for PNM coordinate system
#
tmp = voi[2]
voi[2] = rows - voi[3] - 1
voi[3] = rows - tmp - 1
if tissue['NAME'] == 'skin':
fn = frog_fn
else:
fn = frog_tissue_fn
reader = vtkMetaImageReader()
reader.SetFileName(str(fn))
reader.SetDataSpacing(data_spacing)
reader.SetDataOrigin(data_origin)
reader.SetDataExtent(voi)
reader.Update()
last_connection = reader
if not tissue['NAME'] == 'skin':
if tissue['ISLAND_REPLACE'] >= 0:
island_remover = vtkImageIslandRemoval2D()
island_remover.SetAreaThreshold(tissue['ISLAND_AREA'])
island_remover.SetIslandValue(tissue['ISLAND_REPLACE'])
island_remover.SetReplaceValue(tissue['TISSUE'])
island_remover.SetInput(last_connection.GetOutput())
island_remover.Update()
last_connection = island_remover
select_tissue = vtkImageThreshold()
select_tissue.ThresholdBetween(tissue['TISSUE'], tissue['TISSUE'])
select_tissue.SetInValue(255)
select_tissue.SetOutValue(0)
select_tissue.SetInputConnection(last_connection.GetOutputPort())
last_connection = select_tissue
shrinker = vtkImageShrink3D()
shrinker.SetInputConnection(last_connection.GetOutputPort())
shrinker.SetShrinkFactors(tissue['SAMPLE_RATE'])
shrinker.AveragingOn()
last_connection = shrinker
if not all(v == 0 for v in tissue['GAUSSIAN_STANDARD_DEVIATION']):
gaussian = vtkImageGaussianSmooth()
gaussian.SetStandardDeviation(*tissue['GAUSSIAN_STANDARD_DEVIATION'])
gaussian.SetRadiusFactors(*tissue['GAUSSIAN_RADIUS_FACTORS'])
gaussian.SetInputConnection(shrinker.GetOutputPort())
last_connection = gaussian
# Time the isocontouring.
ict = collections.defaultdict()
iso_value = tissue['VALUE']
if flying_edges:
iso_surface = vtkFlyingEdges3D()
iso_surface.SetInputConnection(last_connection.GetOutputPort())
iso_surface.ComputeScalarsOff()
iso_surface.ComputeGradientsOff()
iso_surface.ComputeNormalsOff()
iso_surface.SetValue(0, iso_value)
timer = vtkExecutionTimer()
timer.SetFilter(iso_surface)
iso_surface.Update()
ict['Flying Edges'] = timer.GetElapsedWallClockTime()
else:
iso_surface = vtkMarchingCubes()
iso_surface.SetInputConnection(last_connection.GetOutputPort())
iso_surface.ComputeScalarsOff()
iso_surface.ComputeGradientsOff()
iso_surface.ComputeNormalsOff()
iso_surface.SetValue(0, iso_value)
timer = vtkExecutionTimer()
timer.SetFilter(iso_surface)
iso_surface.Update()
ict['Marching Cubes'] = timer.GetElapsedWallClockTime()
so = SliceOrder()
# transform = so.get(tissue['SLICE_ORDER'])
# Match Frog.py
transform = so.get('hfap')
transform.Scale(1, -1, 1)
tf = vtkTransformPolyDataFilter()
tf.SetTransform(transform)
tf.SetInputConnection(iso_surface.GetOutputPort())
last_connection = tf
if decimate:
decimator = vtkDecimatePro()
decimator.SetInputConnection(last_connection.GetOutputPort())
decimator.SetFeatureAngle(tissue['DECIMATE_ANGLE'])
decimator.MaximumIterations = tissue['DECIMATE_ITERATIONS']
decimator.PreserveTopologyOn()
decimator.SetErrorIsAbsolute(1)
decimator.SetAbsoluteError(tissue['DECIMATE_ERROR'])
decimator.SetTargetReduction(tissue['DECIMATE_REDUCTION'])
last_connection = decimator
smoother = vtkWindowedSincPolyDataFilter()
smoother.SetInputConnection(last_connection.GetOutputPort())
smoother.SetNumberOfIterations(tissue['SMOOTH_ITERATIONS'])
smoother.BoundarySmoothingOff()
smoother.FeatureEdgeSmoothingOff()
smoother.SetFeatureAngle(tissue['SMOOTH_ANGLE'])
smoother.SetPassBand(tissue['SMOOTH_FACTOR'])
smoother.NonManifoldSmoothingOn()
smoother.NormalizeCoordinatesOff()
smoother.Update()
normals = vtkPolyDataNormals()
normals.SetInputConnection(smoother.GetOutputPort())
normals.SetFeatureAngle(tissue['FEATURE_ANGLE'])
stripper = vtkStripper()
stripper.SetInputConnection(normals.GetOutputPort())
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(stripper.GetOutputPort())
# Create iso-surface
contour = vtkContourFilter()
contour.SetInputConnection(reader.GetOutputPort())
contour.SetValue(0, iso_value)
actor = vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetOpacity(tissue['OPACITY'])
actor.GetProperty().SetDiffuseColor(lut.GetTableValue(tissue['TISSUE'])[:3])
actor.GetProperty().SetSpecular(0.5)
actor.GetProperty().SetSpecularPower(10)
return ict, actor
class SliceOrder:
"""
These transformations permute image and other geometric data to maintain proper
orientation regardless of the acquisition order. After applying these transforms with
vtkTransformFilter, a view up of 0,-1,0 will result in the body part
facing the viewer.
NOTE: some transformations have a -1 scale factor for one of the components.
To ensure proper polygon orientation and normal direction, you must
apply the vtkPolyDataNormals filter.
Naming (the nomenclature is medical):
si - superior to inferior (top to bottom)
is - inferior to superior (bottom to top)
ap - anterior to posterior (front to back)
pa - posterior to anterior (back to front)
lr - left to right
rl - right to left
"""
def __init__(self):
self.si_mat = vtkMatrix4x4()
self.si_mat.Zero()
self.si_mat.SetElement(0, 0, 1)
self.si_mat.SetElement(1, 2, 1)
self.si_mat.SetElement(2, 1, -1)
self.si_mat.SetElement(3, 3, 1)
self.is_mat = vtkMatrix4x4()
self.is_mat.Zero()
self.is_mat.SetElement(0, 0, 1)
self.is_mat.SetElement(1, 2, -1)
self.is_mat.SetElement(2, 1, -1)
self.is_mat.SetElement(3, 3, 1)
self.lr_mat = vtkMatrix4x4()
self.lr_mat.Zero()
self.lr_mat.SetElement(0, 2, -1)
self.lr_mat.SetElement(1, 1, -1)
self.lr_mat.SetElement(2, 0, 1)
self.lr_mat.SetElement(3, 3, 1)
self.rl_mat = vtkMatrix4x4()
self.rl_mat.Zero()
self.rl_mat.SetElement(0, 2, 1)
self.rl_mat.SetElement(1, 1, -1)
self.rl_mat.SetElement(2, 0, 1)
self.rl_mat.SetElement(3, 3, 1)
"""
The previous transforms assume radiological views of the slices (viewed from the feet). other
modalities such as physical sectioning may view from the head. These transforms modify the original
with a 180° rotation about y
"""
self.hf_mat = vtkMatrix4x4()
self.hf_mat.Zero()
self.hf_mat.SetElement(0, 0, -1)
self.hf_mat.SetElement(1, 1, 1)
self.hf_mat.SetElement(2, 2, -1)
self.hf_mat.SetElement(3, 3, 1)
def s_i(self):
t = vtkTransform()
t.SetMatrix(self.si_mat)
return t
def i_s(self):
t = vtkTransform()
t.SetMatrix(self.is_mat)
return t
@staticmethod
def a_p():
t = vtkTransform()
return t.Scale(1, -1, 1)
@staticmethod
def p_a():
t = vtkTransform()
return t.Scale(1, -1, -1)
def l_r(self):
t = vtkTransform()
t.SetMatrix(self.lr_mat)
t.Update()
return t
def r_l(self):
t = vtkTransform()
t.SetMatrix(self.lr_mat)
return t
def h_f(self):
t = vtkTransform()
t.SetMatrix(self.hf_mat)
return t
def hf_si(self):
t = vtkTransform()
t.Concatenate(self.hf_mat)
t.Concatenate(self.si_mat)
return t
def hf_is(self):
t = vtkTransform()
t.Concatenate(self.hf_mat)
t.Concatenate(self.is_mat)
return t
def hf_ap(self):
t = vtkTransform()
t.Concatenate(self.hf_mat)
t.Scale(1, -1, 1)
return t
def hf_pa(self):
t = vtkTransform()
t.Concatenate(self.hf_mat)
t.Scale(1, -1, -1)
return t
def hf_lr(self):
t = vtkTransform()
t.Concatenate(self.hf_mat)
t.Concatenate(self.lr_mat)
return t
def hf_rl(self):
t = vtkTransform()
t.Concatenate(self.hf_mat)
t.Concatenate(self.rl_mat)
return t
def get(self, order):
"""
Returns the vtkTransform corresponding to the slice order.
:param order: The slice order
:return: The vtkTransform to use
"""
if order == 'si':
return self.s_i()
elif order == 'is':
return self.i_s()
elif order == 'ap':
return self.a_p()
elif order == 'pa':
return self.p_a()
elif order == 'lr':
return self.l_r()
elif order == 'rl':
return self.r_l()
elif order == 'hf':
return self.h_f()
elif order == 'hfsi':
return self.hf_si()
elif order == 'hfis':
return self.hf_is()
elif order == 'hfap':
return self.hf_ap()
elif order == 'hfpa':
return self.hf_pa()
elif order == 'hflr':
return self.hf_lr()
elif order == 'hfrl':
return self.hf_rl()
else:
s = 'No such transform "{:s}" exists.'.format(order)
raise Exception(s)
def default_parameters():
p = dict()
p['NAME'] = ''
p['TISSUE'] = '1'
p['START_SLICE'] = '0'
p['END_SLICE'] = '255'
p['STUDY'] = 'frogtissue'
p['VALUE'] = 127.5
p['ROWS'] = 470
p['COLUMNS'] = 500
p['HEADER_SIZE'] = 0
p['PIXEL_SIZE'] = 1
p['SPACING'] = 1.5
p['START_SLICE'] = 1
p['END_SLICE'] = 138
p['REDUCTION'] = 1
p['FEATURE_ANGLE'] = 60
p['DECIMATE_ANGLE'] = 60
p['SMOOTH_ANGLE'] = 60
p['SMOOTH_ITERATIONS'] = 10
p['SMOOTH_FACTOR'] = 0.1
p['DECIMATE_ITERATIONS'] = 1
p['DECIMATE_REDUCTION'] = 1
p['DECIMATE_ERROR'] = 0.0002
p['DECIMATE_ERROR_INCREMENT'] = 0.0002
p['ISLAND_AREA'] = 4
p['ISLAND_REPLACE'] = -1
p['GAUSSIAN_STANDARD_DEVIATION'] = [2, 2, 2]
p['GAUSSIAN_RADIUS_FACTORS'] = [2, 2, 2]
p['VOI'] = [0, p['COLUMNS'] - 1, 0, p['ROWS'] - 1, 0, p['END_SLICE']]
p['SAMPLE_RATE'] = [1, 1, 1]
p['OPACITY'] = 1.0
return p
def blood():
p = frog()
p['NAME'] = 'blood'
p['TISSUE'] = 1
p['START_SLICE'] = 14
p['END_SLICE'] = 131
p['VALUE'] = 4
p['VOI'] = [33, 406, 62, 425, p['START_SLICE'], p['END_SLICE']]
return p
def brain():
p = frog()
p['NAME'] = 'brain'
p['TISSUE'] = 2
p['START_SLICE'] = 1
p['END_SLICE'] = 33
p['VOI'] = [349, 436, 211, 252, p['START_SLICE'], p['END_SLICE']]
return p
def brainbin():
p = frog()
p['NAME'] = 'brainbin'
p['TISSUE'] = 2
p['START_SLICE'] = 1
p['END_SLICE'] = 33
p['VOI'] = [349, 436, 211, 252, p['END_SLICE'], p['START_SLICE']]
p['GAUSSIAN_STANDARD_DEVIATION'] = [0, 0, 0]
p['DECIMATE_ITERATIONS'] = 0
return p
def duodenum():
p = frog()
p['NAME'] = 'duodenum'
p['TISSUE'] = 3
p['START_SLICE'] = 35
p['END_SLICE'] = 105
p['VOI'] = [189, 248, 191, 284, p['START_SLICE'], p['END_SLICE']]
return p
def eye_retna():
p = frog()
p['NAME'] = 'eye_retna'
p['TISSUE'] = 4
p['START_SLICE'] = 1
p['END_SLICE'] = 41
p['VOI'] = [342, 438, 180, 285, p['START_SLICE'], p['END_SLICE']]
return p
def eye_white():
p = frog()
p['NAME'] = 'eye_white'
p['TISSUE'] = 5
p['START_SLICE'] = 1
p['END_SLICE'] = 37
p['VOI'] = [389, 433, 183, 282, p['START_SLICE'], p['END_SLICE']]
return p
def frog():
p = default_parameters()
p['ROWS'] = 470
p['COLUMNS'] = 500
p['STUDY'] = 'frogtissue'
p['SLICE_ORDER'] = 'si'
p['PIXEL_SIZE'] = 1
p['SPACING'] = 1.5
p['VALUE'] = 127.5
p['SAMPLE_RATE'] = [1, 1, 1]
p['GAUSSIAN_STANDARD_DEVIATION'] = [2, 2, 2]
p['DECIMATE_REDUCTION'] = 0.95
p['DECIMATE_ITERATIONS'] = 5
p['DECIMATE_ERROR'] = 0.0002
p['DECIMATE_ERROR_INCREMENT'] = 0.0002
p['SMOOTH_FACTOR'] = 0.1
return p
def heart():
p = frog()
p['NAME'] = 'heart'
p['TISSUE'] = 6
p['START_SLICE'] = 49
p['END_SLICE'] = 93
p['VOI'] = [217, 299, 186, 266, p['START_SLICE'], p['END_SLICE']]
return p
def ileum():
p = frog()
p['NAME'] = 'ileum'
p['TISSUE'] = 7
p['START_SLICE'] = 25
p['END_SLICE'] = 93
p['VOI'] = [172, 243, 201, 290, p['START_SLICE'], p['END_SLICE']]
return p
def kidney():
p = frog()
p['NAME'] = 'kidney'
p['TISSUE'] = 8
p['START_SLICE'] = 24
p['END_SLICE'] = 78
p['VOI'] = [116, 238, 193, 263, p['START_SLICE'], p['END_SLICE']]
return p
def l_intestine():
p = frog()
p['NAME'] = 'l_intestine'
p['TISSUE'] = 9
p['START_SLICE'] = 56
p['END_SLICE'] = 106
p['VOI'] = [115, 224, 209, 284, p['START_SLICE'], p['END_SLICE']]
return p
def liver():
p = frog()
p['NAME'] = 'liver'
p['TISSUE'] = 10
p['START_SLICE'] = 25
p['END_SLICE'] = 126
p['VOI'] = [167, 297, 154, 304, p['START_SLICE'], p['END_SLICE']]
return p
def lung():
p = frog()
p['NAME'] = 'lung'
p['TISSUE'] = 11
p['START_SLICE'] = 24
p['END_SLICE'] = 59
p['VOI'] = [222, 324, 157, 291, p['START_SLICE'], p['END_SLICE']]
return p
def nerve():
p = frog()
p['NAME'] = 'nerve'
p['TISSUE'] = 12
p['START_SLICE'] = 7
p['END_SLICE'] = 113
p['VOI'] = [79, 403, 63, 394, p['START_SLICE'], p['END_SLICE']]
return p
def skin():
p = default_parameters()
p['NAME'] = 'skin'
p['TISSUE'] = 0
p['ROWS'] = 470
p['COLUMNS'] = 500
p['STUDY'] = 'frog'
p['SLICE_ORDER'] = 'si'
p['PIXEL_SIZE'] = 1
p['SPACING'] = 1.5
p['START_SLICE'] = 1
p['END_SLICE'] = 138
p['VOI'] = [0, 499, 0, 469, p['START_SLICE'], p['END_SLICE']]
p['VALUE'] = 10.5
p['SAMPLE_RATE'] = [2, 2, 1]
p['DECIMATE_REDUCTION'] = 0.95
p['DECIMATE_ITERATIONS'] = 10
p['DECIMATE_ERROR'] = 0.0002
p['DECIMATE_ERROR_INCREMENT'] = 0.0002
p['FEATURE_ANGLE'] = 60
p['OPACITY'] = 0.4
return p
def skeleton():
p = frog()
p['STUDY'] = 'frogtissue'
p['NAME'] = 'skeleton'
p['TISSUE'] = 13
p['VALUE'] = 64.5
p['START_SLICE'] = 1
p['END_SLICE'] = 136
p['VOI'] = [23, 479, 8, 469, p['START_SLICE'], p['END_SLICE']]
p['GAUSSIAN_STANDARD_DEVIATION'] = [1.5, 1.5, 1]
return p
def spleen():
p = frog()
p['NAME'] = 'spleen'
p['TISSUE'] = 14
p['START_SLICE'] = 45
p['END_SLICE'] = 68
p['VOI'] = [166, 219, 195, 231, p['START_SLICE'], p['END_SLICE']]
return p
def stomach():
p = frog()
p['NAME'] = 'stomach'
p['TISSUE'] = 15
p['START_SLICE'] = 26
p['END_SLICE'] = 119
p['VOI'] = [143, 365, 158, 297, p['START_SLICE'], p['END_SLICE']]
return p
def tissue_parameters():
t = dict()
t['blood'] = blood()
t['brain'] = brain()
t['brainbin'] = brainbin()
t['duodenum'] = duodenum()
t['eye_retna'] = eye_retna()
t['eye_white'] = eye_white()
t['frog'] = frog()
t['heart'] = heart()
t['ileum'] = ileum()
t['kidney'] = kidney()
t['l_intestine'] = l_intestine()
t['liver'] = liver()
t['lung'] = lung()
t['nerve'] = nerve()
t['skin'] = skin()
t['skeleton'] = skeleton()
t['spleen'] = spleen()
t['stomach'] = stomach()
return t
def create_frog_lut(colors):
lut = vtkLookupTable()
lut.SetNumberOfColors(16)
lut.SetTableRange(0, 15)
lut.Build()
lut.SetTableValue(0, colors.GetColor4d('LimeGreen')) # skin
lut.SetTableValue(1, colors.GetColor4d('salmon')) # blood
lut.SetTableValue(2, colors.GetColor4d('beige')) # brain
lut.SetTableValue(3, colors.GetColor4d('orange')) # duodenum
lut.SetTableValue(4, colors.GetColor4d('misty_rose')) # eye_retina
lut.SetTableValue(5, colors.GetColor4d('white')) # eye_white
lut.SetTableValue(6, colors.GetColor4d('tomato')) # heart
lut.SetTableValue(7, colors.GetColor4d('raspberry')) # ileum
lut.SetTableValue(8, colors.GetColor4d('banana')) # kidney
lut.SetTableValue(9, colors.GetColor4d('peru')) # l_intestine
lut.SetTableValue(10, colors.GetColor4d('pink')) # liver
lut.SetTableValue(11, colors.GetColor4d('powder_blue')) # lung
lut.SetTableValue(12, colors.GetColor4d('carrot')) # nerve
lut.SetTableValue(13, colors.GetColor4d('wheat')) # skeleton
lut.SetTableValue(14, colors.GetColor4d('violet')) # spleen
lut.SetTableValue(15, colors.GetColor4d('plum')) # stomach
return lut
def check_for_required_parameters(tissue, parameters):
required = {'NAME', 'END_SLICE', 'TISSUE', 'STUDY', 'ROWS',
'COLUMNS', 'VALUE', 'SPACING',
'GAUSSIAN_STANDARD_DEVIATION', 'VOI',
'DECIMATE_ITERATIONS'}
k = set(parameters.keys())
s = None
if len(k) == 0:
s = 'Missing parameters for {:11s}: {:s}'.format(tissue, ', '.join(map(str, required)))
else:
d = required.difference(k)
if d:
s = 'Missing parameters for {:11s}: {:s}'.format(tissue, ', '.join(map(str, d)))
return s
def format_timings(ict):
res = list()
total = 0
sk = sorted(ict.keys())
for k in sk:
sigma = 0
res.append('{:11s}'.format(k))
skk = sorted(ict[k].keys())
for kk in skk:
sigma += ict[k][kk]
res.append('{:11s}{:13s} {:5.2f}s'.format(' ', kk, ict[k][kk]))
total += sigma
res.append('Subtotal: {:5.2f}s'.format(sigma))
res.append(' Total: {:5.2f}s'.format(total))
return res
if __name__ == '__main__':
import sys
data_folder, tissue, view, flying_edges, decimate = get_program_parameters(sys.argv)
main(data_folder, tissue, view, flying_edges, decimate)
| [
"vtkmodules.vtkInteractionWidgets.vtkOrientationMarkerWidget",
"vtkmodules.vtkCommonTransforms.vtkTransform",
"vtkmodules.vtkFiltersCore.vtkContourFilter",
"vtkmodules.vtkFiltersCore.vtkStripper",
"vtkmodules.vtkRenderingCore.vtkRenderWindow",
"vtkmodules.vtkIOImage.vtkMetaImageReader",
"vtkmodules.vtkI... | [((2219, 2342), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'epilog': 'epilogue', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=description, epilog=epilogue,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n', (2242, 2342), False, 'import argparse\n'), ((3611, 3627), 'vtkmodules.vtkCommonColor.vtkNamedColors', 'vtkNamedColors', ([], {}), '()\n', (3625, 3627), False, 'from vtkmodules.vtkCommonColor import vtkNamedColors\n'), ((3640, 3657), 'pathlib.Path', 'Path', (['data_folder'], {}), '(data_folder)\n', (3644, 3657), False, 'from pathlib import Path\n'), ((4799, 4812), 'vtkmodules.vtkRenderingCore.vtkRenderer', 'vtkRenderer', ([], {}), '()\n', (4810, 4812), False, 'from vtkmodules.vtkRenderingCore import vtkActor, vtkPolyDataMapper, vtkRenderWindow, vtkRenderWindowInteractor, vtkRenderer\n'), ((4833, 4850), 'vtkmodules.vtkRenderingCore.vtkRenderWindow', 'vtkRenderWindow', ([], {}), '()\n', (4848, 4850), False, 'from vtkmodules.vtkRenderingCore import vtkActor, vtkPolyDataMapper, vtkRenderWindow, vtkRenderWindowInteractor, vtkRenderer\n'), ((4922, 4949), 'vtkmodules.vtkRenderingCore.vtkRenderWindowInteractor', 'vtkRenderWindowInteractor', ([], {}), '()\n', (4947, 4949), False, 'from vtkmodules.vtkRenderingCore import vtkActor, vtkPolyDataMapper, vtkRenderWindow, vtkRenderWindowInteractor, vtkRenderer\n'), ((5081, 5110), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (5104, 5110), False, 'import collections\n'), ((7079, 7093), 'vtkmodules.vtkRenderingAnnotation.vtkAxesActor', 'vtkAxesActor', ([], {}), '()\n', (7091, 7093), False, 'from vtkmodules.vtkRenderingAnnotation import vtkAxesActor\n'), ((7108, 7136), 'vtkmodules.vtkInteractionWidgets.vtkOrientationMarkerWidget', 'vtkOrientationMarkerWidget', ([], {}), '()\n', (7134, 7136), False, 'from vtkmodules.vtkInteractionWidgets import vtkOrientationMarkerWidget\n'), ((8233, 8253), 'vtkmodules.vtkIOImage.vtkMetaImageReader', 'vtkMetaImageReader', ([], {}), '()\n', (8251, 8253), False, 'from vtkmodules.vtkIOImage import vtkMetaImageReader\n'), ((9246, 9264), 'vtkmodules.vtkImagingCore.vtkImageShrink3D', 'vtkImageShrink3D', ([], {}), '()\n', (9262, 9264), False, 'from vtkmodules.vtkImagingCore import vtkImageShrink3D, vtkImageThreshold\n'), ((9842, 9867), 'collections.defaultdict', 'collections.defaultdict', ([], {}), '()\n', (9865, 9867), False, 'import collections\n'), ((10978, 11006), 'vtkmodules.vtkFiltersGeneral.vtkTransformPolyDataFilter', 'vtkTransformPolyDataFilter', ([], {}), '()\n', (11004, 11006), False, 'from vtkmodules.vtkFiltersGeneral import vtkTransformPolyDataFilter\n'), ((11630, 11661), 'vtkmodules.vtkFiltersCore.vtkWindowedSincPolyDataFilter', 'vtkWindowedSincPolyDataFilter', ([], {}), '()\n', (11659, 11661), False, 'from vtkmodules.vtkFiltersCore import vtkContourFilter, vtkDecimatePro, vtkExecutionTimer, vtkFlyingEdges3D, vtkMarchingCubes, vtkPolyDataNormals, vtkStripper, vtkWindowedSincPolyDataFilter\n'), ((12083, 12103), 'vtkmodules.vtkFiltersCore.vtkPolyDataNormals', 'vtkPolyDataNormals', ([], {}), '()\n', (12101, 12103), False, 'from vtkmodules.vtkFiltersCore import vtkContourFilter, vtkDecimatePro, vtkExecutionTimer, vtkFlyingEdges3D, vtkMarchingCubes, vtkPolyDataNormals, vtkStripper, vtkWindowedSincPolyDataFilter\n'), ((12230, 12243), 'vtkmodules.vtkFiltersCore.vtkStripper', 'vtkStripper', ([], {}), '()\n', (12241, 12243), False, 'from vtkmodules.vtkFiltersCore import vtkContourFilter, vtkDecimatePro, vtkExecutionTimer, vtkFlyingEdges3D, vtkMarchingCubes, vtkPolyDataNormals, vtkStripper, vtkWindowedSincPolyDataFilter\n'), ((12315, 12334), 'vtkmodules.vtkRenderingCore.vtkPolyDataMapper', 'vtkPolyDataMapper', ([], {}), '()\n', (12332, 12334), False, 'from vtkmodules.vtkRenderingCore import vtkActor, vtkPolyDataMapper, vtkRenderWindow, vtkRenderWindowInteractor, vtkRenderer\n'), ((12431, 12449), 'vtkmodules.vtkFiltersCore.vtkContourFilter', 'vtkContourFilter', ([], {}), '()\n', (12447, 12449), False, 'from vtkmodules.vtkFiltersCore import vtkContourFilter, vtkDecimatePro, vtkExecutionTimer, vtkFlyingEdges3D, vtkMarchingCubes, vtkPolyDataNormals, vtkStripper, vtkWindowedSincPolyDataFilter\n'), ((12553, 12563), 'vtkmodules.vtkRenderingCore.vtkActor', 'vtkActor', ([], {}), '()\n', (12561, 12563), False, 'from vtkmodules.vtkRenderingCore import vtkActor, vtkPolyDataMapper, vtkRenderWindow, vtkRenderWindowInteractor, vtkRenderer\n'), ((23840, 23856), 'vtkmodules.vtkCommonCore.vtkLookupTable', 'vtkLookupTable', ([], {}), '()\n', (23854, 23856), False, 'from vtkmodules.vtkCommonCore import vtkLookupTable\n'), ((8946, 8965), 'vtkmodules.vtkImagingCore.vtkImageThreshold', 'vtkImageThreshold', ([], {}), '()\n', (8963, 8965), False, 'from vtkmodules.vtkImagingCore import vtkImageShrink3D, vtkImageThreshold\n'), ((9532, 9556), 'vtkmodules.vtkImagingGeneral.vtkImageGaussianSmooth', 'vtkImageGaussianSmooth', ([], {}), '()\n', (9554, 9556), False, 'from vtkmodules.vtkImagingGeneral import vtkImageGaussianSmooth\n'), ((9943, 9961), 'vtkmodules.vtkFiltersCore.vtkFlyingEdges3D', 'vtkFlyingEdges3D', ([], {}), '()\n', (9959, 9961), False, 'from vtkmodules.vtkFiltersCore import vtkContourFilter, vtkDecimatePro, vtkExecutionTimer, vtkFlyingEdges3D, vtkMarchingCubes, vtkPolyDataNormals, vtkStripper, vtkWindowedSincPolyDataFilter\n'), ((10215, 10234), 'vtkmodules.vtkFiltersCore.vtkExecutionTimer', 'vtkExecutionTimer', ([], {}), '()\n', (10232, 10234), False, 'from vtkmodules.vtkFiltersCore import vtkContourFilter, vtkDecimatePro, vtkExecutionTimer, vtkFlyingEdges3D, vtkMarchingCubes, vtkPolyDataNormals, vtkStripper, vtkWindowedSincPolyDataFilter\n'), ((10395, 10413), 'vtkmodules.vtkFiltersCore.vtkMarchingCubes', 'vtkMarchingCubes', ([], {}), '()\n', (10411, 10413), False, 'from vtkmodules.vtkFiltersCore import vtkContourFilter, vtkDecimatePro, vtkExecutionTimer, vtkFlyingEdges3D, vtkMarchingCubes, vtkPolyDataNormals, vtkStripper, vtkWindowedSincPolyDataFilter\n'), ((10667, 10686), 'vtkmodules.vtkFiltersCore.vtkExecutionTimer', 'vtkExecutionTimer', ([], {}), '()\n', (10684, 10686), False, 'from vtkmodules.vtkFiltersCore import vtkContourFilter, vtkDecimatePro, vtkExecutionTimer, vtkFlyingEdges3D, vtkMarchingCubes, vtkPolyDataNormals, vtkStripper, vtkWindowedSincPolyDataFilter\n'), ((11156, 11172), 'vtkmodules.vtkFiltersCore.vtkDecimatePro', 'vtkDecimatePro', ([], {}), '()\n', (11170, 11172), False, 'from vtkmodules.vtkFiltersCore import vtkContourFilter, vtkDecimatePro, vtkExecutionTimer, vtkFlyingEdges3D, vtkMarchingCubes, vtkPolyDataNormals, vtkStripper, vtkWindowedSincPolyDataFilter\n'), ((13671, 13685), 'vtkmodules.vtkCommonMath.vtkMatrix4x4', 'vtkMatrix4x4', ([], {}), '()\n', (13683, 13685), False, 'from vtkmodules.vtkCommonMath import vtkMatrix4x4\n'), ((13897, 13911), 'vtkmodules.vtkCommonMath.vtkMatrix4x4', 'vtkMatrix4x4', ([], {}), '()\n', (13909, 13911), False, 'from vtkmodules.vtkCommonMath import vtkMatrix4x4\n'), ((14124, 14138), 'vtkmodules.vtkCommonMath.vtkMatrix4x4', 'vtkMatrix4x4', ([], {}), '()\n', (14136, 14138), False, 'from vtkmodules.vtkCommonMath import vtkMatrix4x4\n'), ((14351, 14365), 'vtkmodules.vtkCommonMath.vtkMatrix4x4', 'vtkMatrix4x4', ([], {}), '()\n', (14363, 14365), False, 'from vtkmodules.vtkCommonMath import vtkMatrix4x4\n'), ((14849, 14863), 'vtkmodules.vtkCommonMath.vtkMatrix4x4', 'vtkMatrix4x4', ([], {}), '()\n', (14861, 14863), False, 'from vtkmodules.vtkCommonMath import vtkMatrix4x4\n'), ((15085, 15099), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (15097, 15099), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((15182, 15196), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (15194, 15196), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((15293, 15307), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (15305, 15307), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((15387, 15401), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (15399, 15401), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((15468, 15482), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (15480, 15482), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((15584, 15598), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (15596, 15598), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((15681, 15695), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (15693, 15695), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((15780, 15794), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (15792, 15794), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((15916, 15930), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (15928, 15930), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((16052, 16066), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (16064, 16066), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((16179, 16193), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (16191, 16193), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((16307, 16321), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (16319, 16321), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((16443, 16457), 'vtkmodules.vtkCommonTransforms.vtkTransform', 'vtkTransform', ([], {}), '()\n', (16455, 16457), False, 'from vtkmodules.vtkCommonTransforms import vtkTransform\n'), ((8553, 8578), 'vtkmodules.vtkImagingMorphological.vtkImageIslandRemoval2D', 'vtkImageIslandRemoval2D', ([], {}), '()\n', (8576, 8578), False, 'from vtkmodules.vtkImagingMorphological import vtkImageIslandRemoval2D\n')] |
# Author ProGramMoS, <NAME>
# Version 0.2b
import sys
from com.l2jfrozen.gameserver.model.actor.instance import L2PcInstance
from com.l2jfrozen.util.database import L2DatabaseFactory
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "8871_gve"
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent(self,event,st):
st.getPlayer().setTarget(st.getPlayer())
if event == "1": #good
st.getPlayer.setGood(true)
st.setState(COMPLETED)
if event == "2": #evil
st.getPlayer.setEvil(true)
st.setState(COMPLETED)
if event == "3": #unfact good
st.getPlayer.setGood(false)
st.setState(COMPLETED)
if event == "4": #unfact evil
st.getPlayer.setEvil(false)
st.setState(COMPLETED)
return
QUEST = Quest(8871,qn,"custom")
CREATED = State('Start',QUEST)
STARTED = State('Started',QUEST)
COMPLETED = State('Completed',QUEST)
QUEST.setInitialState(CREATED) | [
"com.l2jfrozen.gameserver.model.quest.State",
"com.l2jfrozen.gameserver.model.quest.jython.QuestJython.__init__"
] | [((981, 1002), 'com.l2jfrozen.gameserver.model.quest.State', 'State', (['"""Start"""', 'QUEST'], {}), "('Start', QUEST)\n", (986, 1002), False, 'from com.l2jfrozen.gameserver.model.quest import State\n'), ((1013, 1036), 'com.l2jfrozen.gameserver.model.quest.State', 'State', (['"""Started"""', 'QUEST'], {}), "('Started', QUEST)\n", (1018, 1036), False, 'from com.l2jfrozen.gameserver.model.quest import State\n'), ((1048, 1073), 'com.l2jfrozen.gameserver.model.quest.State', 'State', (['"""Completed"""', 'QUEST'], {}), "('Completed', QUEST)\n", (1053, 1073), False, 'from com.l2jfrozen.gameserver.model.quest import State\n'), ((454, 492), 'com.l2jfrozen.gameserver.model.quest.jython.QuestJython.__init__', 'JQuest.__init__', (['self', 'id', 'name', 'descr'], {}), '(self, id, name, descr)\n', (469, 492), True, 'from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest\n')] |
# Pyspark example called by mlrun_spark_k8s.ipynb
from pyspark.sql import SparkSession
from mlrun import get_or_create_ctx
# Acquire MLRun context
mlctx = get_or_create_ctx("spark-function")
# Get MLRun parameters
mlctx.logger.info("!@!@!@!@!@ Getting env variables")
READ_OPTIONS = mlctx.get_param("data_sources")
QUERY = mlctx.get_param("query")
WRITE_OPTIONS = mlctx.get_param("write_options")
# Create spark session
spark = SparkSession.builder.appName("Spark function").getOrCreate()
# Loading data from a JDBC source
for data_source in READ_OPTIONS:
spark.read.load(**READ_OPTIONS[data_source]).createOrReplaceTempView(data_source)
# Transform the data using SQL query
spark.sql(QUERY).write.save(**WRITE_OPTIONS)
# write the result datadrame to destination
mlctx.logger.info("!@!@!@!@!@ Saved")
spark.stop()
| [
"mlrun.get_or_create_ctx",
"pyspark.sql.SparkSession.builder.appName"
] | [((159, 194), 'mlrun.get_or_create_ctx', 'get_or_create_ctx', (['"""spark-function"""'], {}), "('spark-function')\n", (176, 194), False, 'from mlrun import get_or_create_ctx\n'), ((434, 480), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', (['"""Spark function"""'], {}), "('Spark function')\n", (462, 480), False, 'from pyspark.sql import SparkSession\n')] |
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
import h5py
import copy
import time
import os
from whacc import utils
def isnotebook():
try:
c = str(get_ipython().__class__)
shell = get_ipython().__class__.__name__
if 'colab' in c:
return True
elif shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
if isnotebook():
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
def stack_imgs_lag(imgs, frames_1=None, buffer=2, shift_to_the_right_by=0):
if frames_1 is None:
frames_1 = [imgs.shape[0]]
array_group = []
for k1, k2 in utils.loop_segments(frames_1):
x = (np.random.random(imgs[0].shape) * 255).astype(np.uint8)
tile_axes = [1] * len(x.shape) + [buffer]
x = np.tile(x[:, :, None], tile_axes)
tmp1 = x.copy()
for ii, stack_i in enumerate(range(k1, k2)):
x = np.concatenate((x, imgs[stack_i][:, :, None]), axis=2)
x = np.concatenate((x, tmp1), axis=2)
for k3 in range(k2 - k1):
array_group.append(x[:, :, k3 + shift_to_the_right_by: k3 + 1 + buffer + shift_to_the_right_by])
return np.asarray(array_group)
def get_h5_key_and_concatenate(h5_list, key_name='labels'):
"""
simply extract and concatenate all of one key "key_name" from many H5 files, I use it to get balance the data touch
and not touch frames when training a model with a list of different H5 files
Parameters
----------
h5_list : list
list of full paths to H5 file(s).
key_name : str
default 'labels', the key to get the data from the H5 file
"""
h5_list = utils.make_list(h5_list, suppress_warning=True)
for i, k in enumerate(h5_list):
with h5py.File(k, 'r') as h:
if i == 0:
out = np.asarray(h[key_name][:])
else:
out = np.concatenate((out, h[key_name][:]))
return out
def get_h5_key_and_dont_concatenate(h5_list, key_name='labels'):
"""
simply extract and concatenate all of one key "key_name" from many H5 files, I use it to get balance the data touch
and not touch frames when training a model with a list of different H5 files
Parameters
----------
h5_list : list
list of full paths to H5 file(s).
key_name : str
default 'labels', the key to get the data from the H5 file
"""
out = []
for i, k in enumerate(h5_list):
with h5py.File(k, 'r') as h:
out.append(list(h[key_name][:]))
return out
def clone_h5_basic_info(H5_list, fold_name=None, file_end='_QUICK_SAVE.h5'):
"""
copies all the info form H5 into another H5 file NOT INCLUDING the labels or images. so it have all the file info,
like names and pole locations and polate match max value stack. anything with 'images' , 'MODEL__' or 'labels' is
not copied over to the new file.
Parameters
----------
H5_list : list
list of H5 files to clone
fold_name : str
default None, where to place the cloned H5 files. if left blank it will place in the same folder as the original file
file_end : str
default '_QUICK_SAVE.h5', how to change the name of the H5 file to be cloned to differentiate it from the original
Returns
-------
all_new_h5s: list
list of new H5 full file names
"""
if fold_name is not None:
try:
os.mkdir(fold_name)
except:
pass
all_new_h5s = []
for h5 in H5_list:
if fold_name is not None:
new_fn = fold_name + os.path.sep + os.path.basename(h5)[:-3] + file_end
else: #
new_fn = os.path.dirname(h5) + os.path.sep + os.path.basename(h5)[:-3] + file_end
all_new_h5s.append(new_fn)
try:
os.remove(new_fn)
except:
pass
with h5py.File(new_fn, 'w') as f1:
with h5py.File(h5, 'r') as f2:
for i, k in enumerate(f2.keys()):
if 'images' != k and 'MODEL__' not in k and 'labels' not in k:
f1.create_dataset(k, data=f2[k][:])
f2.close()
f1.close()
return all_new_h5s
def del_h5_with_term(h5_list, str_2_cmp):
"""
Parameters
----------
h5_list : list
list of H5 strings (full path)
str_2_cmp : str
will delete keys with this in their title ... e.g. '__RETRAIN'
"""
for k2 in h5_list:
with h5py.File(k2, 'a') as h5_source:
for k in h5_source.keys():
if str_2_cmp in k:
print('del--> ' + k)
del h5_source[k]
print('_______')
def split_h5_loop_segments(h5_to_split_list, split_percentages, temp_base_name, chunk_size=10000,
add_numbers_to_name=True,
disable_TQDM=False, set_seed=None, color_channel=True):
"""Randomly splits images from a list of H5 file(s) into len(split_percentages) different H5 files.
Parameters
----------
h5_to_split_list : list
list of strings with full file names to the H5 file(s) to be split
split_percentages : list
list of numbers, can be ints [20, 1, 1] and or floats [.8, .2], it simply takes the sum and creates a percentage
temp_base_name : str or list
full path to new h5 file e.g "'/Users/phil/tempH5_" and the program will add the number and the ".h5"
in this case tempH5_0.h5, tempH5_1.h5, tempH5_2.h5 etc. or if it is a list it must be equal in length to
'split_percentages' and each file will be named based on that list
chunk_size = int
default 10000, max amount of frames to hold in memory at a time before storing in H5 file. Should almost never
be an issue but just in case you can set to a lower value if you experience memory issues.
add_numbers_to_name = bool
default true, just in case you don't want the numbers on the end of your h5 file.
Returns
Examples
--------
from whacc import image_tools, utils
h5_to_split_list = "/Users/phil/Downloads/untitled folder 2/AH0000x000000_small_tester.h5"
h5_to_split_list = [h5_to_split_list]
utils.print_h5_keys(h5_to_split_list[0])
bd = '/Users/phil/Downloads/untitled folder 2/'
image_tools.split_h5_loop_segments(h5_to_split_list, [1, 3], [bd+'TRASH', bd+'TRASH2'], chunk_size=10000, add_numbers_to_name=False,
disable_TQDM=False, set_seed = None)
-------
"""
if isinstance(temp_base_name, str):
temp_base_name = [temp_base_name] * len(split_percentages)
else:
assert len(temp_base_name) == len(
split_percentages), """if 'temp_base_name' is a list of strings, it must be equal in length to 'split_percentages'"""
for i, k in enumerate(temp_base_name):
if k[-3:] == '.h5':
temp_base_name[i] = temp_base_name[i][:-3]
frame_num_array_list = get_h5_key_and_dont_concatenate(h5_to_split_list, 'frame_nums')
total_frames = len(get_h5_key_and_concatenate(h5_to_split_list, key_name='labels'))
cnt1 = 0
h5_creators = dict()
split_percentages = split_percentages / np.sum(split_percentages)
# assert(sum(split_percentages)==1)
final_names = []
for iii, h5_to_split in enumerate(h5_to_split_list):
with h5py.File(h5_to_split, 'r') as h:
tmp_frame_list = frame_num_array_list[iii]
L = len(tmp_frame_list)
if set_seed is not None:
np.random.seed(set_seed)
mixed_inds = np.random.choice(L, L, replace=False)
random_segment_inds = np.split(mixed_inds, np.ceil(L * np.cumsum(split_percentages[:-1])).astype('int'))
random_segment_inds = [sorted(tmpk) for tmpk in random_segment_inds]
random_frame_inds = [[None]] * len(random_segment_inds)
list_of_new_frame_nums = [[None]] * len(random_segment_inds)
loop_seg_list = list(utils.loop_segments(tmp_frame_list))
for pi, p in enumerate(random_segment_inds):
tmp1 = []
tmp2 = []
for pp in p:
x = list(loop_seg_list[pp])
tmp1 += list(range(x[0], x[1]))
tmp2.append(tmp_frame_list[pp])
random_frame_inds[pi] = tmp1
list_of_new_frame_nums[pi] = tmp2
for i, k in enumerate(split_percentages): # for each new h5 created
if iii == 0: # create the H5 creators
if add_numbers_to_name:
final_names.append(temp_base_name[i] + '_' + str(i) + '.h5')
else:
final_names.append(temp_base_name[i] + '.h5')
h5_creators[i] = h5_iterative_creator(final_names[-1],
overwrite_if_file_exists=True,
close_and_open_on_each_iteration=True,
color_channel=color_channel)
ims = []
labels = []
for ii in tqdm(sorted(random_frame_inds[i]), disable=disable_TQDM, total=total_frames, initial=cnt1):
cnt1 += 1
ims.append(h['images'][ii])
labels.append(h['labels'][ii])
if ii > 0 and ii % chunk_size == 0:
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
ims = []
labels = []
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
with h5py.File(h5_creators[i].h5_full_file_name,
'r+') as h2: # wanted to do this to allow NONE as input and still have frame nums, but I need to have an append after creating and its a pain
frame_nums = np.asarray(list_of_new_frame_nums[i])
if 'frame_nums' not in h2.keys():
h2.create_dataset('frame_nums', shape=np.shape(frame_nums), maxshape=(None,), chunks=True,
data=frame_nums)
else:
h2['frame_nums'].resize(h2['frame_nums'].shape[0] + frame_nums.shape[0], axis=0)
h2['frame_nums'][-frame_nums.shape[0]:] = frame_nums
# # add the frame info to each
# for i, frame_nums in enumerate(list_of_new_frame_nums):
# with h5py.File(h5_creators[i].h5_full_file_name, 'r+') as h:
# h.create_dataset('frame_nums', shape=np.shape(frame_nums), data=frame_nums)
return final_names
def make_sure_frame_nums_exist(h5file):
with h5py.File(h5file, 'r+') as h:
key_list = list(h.keys())
if 'frame_nums' in key_list:
print("""'frame_nums' already in the key list""")
return None
if 'trial_nums_and_frame_nums' not in key_list:
print(
"""key 'trial_nums_and_frame_nums' must be in the provided h5 this is the only reason program exists""")
return None
frame_nums = h['trial_nums_and_frame_nums'][1, :]
h.create_dataset('frame_nums', shape=np.shape(frame_nums), data=frame_nums)
def split_h5(h5_to_split_list, split_percentages, temp_base_name, chunk_size=10000, add_numbers_to_name=True,
disable_TQDM=False, skip_if_label_is_neg_1=False, set_seed=None, color_channel=True):
"""Randomly splits images from a list of H5 file(s) into len(split_percentages) different H5 files.
Parameters
----------
h5_to_split_list : list
list of strings with full file names to the H5 file(s) to be split
split_percentages : list
list of numbers, can be ints [20, 1, 1] and or floats [.8, .2], it simply takes the sum and creates a percentage
temp_base_name : str or list
full path to new h5 file e.g "'/Users/phil/tempH5_" and the program will add the number and the ".h5"
in this case tempH5_0.h5, tempH5_1.h5, tempH5_2.h5 etc. or if it is a list it must be equal in length to
'split_percentages' and each file will be named based on that list
chunk_size = int
default 10000, max amount of frames to hold in memory at a time before storing in H5 file. Should almost never
be an issue but just in case you can set to a lower value if you experience memory issues.
add_numbers_to_name = bool
default true, just in case you don't want the numbers on the end of your h5 file.
Returns
-------
"""
if isinstance(temp_base_name, str):
temp_base_name = [temp_base_name] * len(split_percentages)
else:
assert len(temp_base_name) == len(
split_percentages), """if 'temp_base_name' is a list of strings, it must be equal in length to 'split_percentages'"""
total_frames = len(get_h5_key_and_concatenate(h5_to_split_list, key_name='labels'))
cnt1 = 0
h5_creators = dict()
split_percentages = split_percentages / np.sum(split_percentages)
# assert(sum(split_percentages)==1)
final_names = []
for iii, h5_to_split in enumerate(h5_to_split_list):
with h5py.File(h5_to_split, 'r') as h:
L = len(h['labels'][:])
if set_seed is not None:
np.random.seed(set_seed)
mixed_inds = np.random.choice(L, L, replace=False)
if skip_if_label_is_neg_1: # remove -1s
mixed_inds = mixed_inds[mixed_inds != -1]
random_frame_inds = np.split(mixed_inds, np.ceil(L * np.cumsum(split_percentages[:-1])).astype('int'))
for i, k in enumerate(split_percentages):
if iii == 0: # create the H5 creators
if add_numbers_to_name:
final_names.append(temp_base_name[i] + '_' + str(i) + '.h5')
else:
final_names.append(temp_base_name[i] + '.h5')
h5_creators[i] = h5_iterative_creator(final_names[-1],
overwrite_if_file_exists=True,
close_and_open_on_each_iteration=True,
color_channel=color_channel)
ims = []
labels = []
# print('starting ' + str(iii*i + 1) + ' of ' + str(len(split_percentages)*len(h5_to_split_list)))
for ii in tqdm(sorted(random_frame_inds[i]), disable=disable_TQDM, total=total_frames, initial=cnt1):
cnt1 += 1
ims.append(h['images'][ii])
labels.append(h['labels'][ii])
if ii > 0 and ii % chunk_size == 0:
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
ims = []
labels = []
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
return final_names
class h5_iterative_creator():
"""Create an H5 file using a for loop easily. used to create the augmented H5 file for training
Attributes:
Parameters
----------
h5_new_full_file_name : string
full path name to your H5 file to be created
overwrite_if_file_exists : bool
overwrites the h5 file if it already exists
max_img_height : int
default 61, only the max size, can be larger in case you are going to have larger images
max_img_width : int
default 61, only the max size, can be larger in case you are going to have larger images
close_and_open_on_each_iteration : bool
default True, this prevents the user from forgetting to close H5 which
can lead to corruption.
Example
_______
h5creator = h5_iterative_creator(new_H5_file)
h5creator.add_to_h5(img_stack1, labels_stack1)
h5creator.add_to_h5(img_stack2, labels_stack2)
h5creator.add_to_h5(img_stack3, labels_stack3)
"""
def __init__(self, h5_new_full_file_name,
overwrite_if_file_exists=False,
max_img_height=61,
max_img_width=61,
close_and_open_on_each_iteration=True,
color_channel=True,
add_to_existing_H5=False):
if not close_and_open_on_each_iteration:
print('**remember to CLOSE the H5 file when you are done!!!**')
if overwrite_if_file_exists and os.path.isfile(h5_new_full_file_name):
os.remove(h5_new_full_file_name)
self.h5_full_file_name = h5_new_full_file_name
if add_to_existing_H5:
self.hf_file = h5py.File(h5_new_full_file_name, "r+")
else:
self.hf_file = h5py.File(h5_new_full_file_name, "w")
self.color_channel = color_channel
self.max_img_height = max_img_height
self.max_img_width = max_img_width
self._went_through_create_h5 = False
self.close_it = close_and_open_on_each_iteration
if self.close_it:
self.hf_file.close()
def add_to_h5(self, images, labels):
"""
Parameters
----------
images : numpy tensor
chunk of images
labels : numpy array
array oof labels
"""
if self.close_it:
self.open_or_close_h5('r+')
if self._went_through_create_h5: # already initialized with the correct size
self._add_next_chunk_to_h5(images, labels)
else:
self._create_h5(images, labels)
if self.close_it:
self.open_or_close_h5('close')
def _create_h5(self, images, labels):
"""
Parameters
----------
images :
labels :
"""
# if set_multiplier:
self.hf_file.create_dataset("multiplier", [1], h5py.h5t.STD_I32LE, data=images.shape[0])
if self.color_channel:
self.hf_file.create_dataset('images',
np.shape(images),
h5py.h5t.STD_U8BE,
maxshape=(None, self.max_img_height, self.max_img_width, 3),
chunks=True,
data=images)
else:
self.hf_file.create_dataset('images',
np.shape(images),
h5py.h5t.STD_U8BE,
maxshape=(None, self.max_img_height, self.max_img_width),
chunks=True,
data=images)
self.hf_file.create_dataset('labels',
np.shape(labels),
h5py.h5t.STD_I32LE,
maxshape=(None,),
chunks=True,
data=labels)
self._went_through_create_h5 = True
def _add_next_chunk_to_h5(self, images, labels):
"""
Parameters
----------
images :
labels :
Returns
-------
"""
self.hf_file['images'].resize(self.hf_file['images'].shape[0] + images.shape[0], axis=0)
self.hf_file['labels'].resize(self.hf_file['labels'].shape[0] + labels.shape[0], axis=0)
self.hf_file['images'][-images.shape[0]:] = images
self.hf_file['labels'][-labels.shape[0]:] = labels
def read_h5(self):
""" """
self.open_or_close_h5('r')
print('''**remember to CLOSE the H5 file when you are done!!!** with ".close_h5()" method''')
def close_h5(self):
""" """
self.open_or_close_h5('close')
print('H5 file was closed')
def open_or_close_h5(self, mode_='r'):
"""
Parameters
----------
mode_ : str
mode can be H5py modes 'r', 'r+' 'w' (w overwrites file!) etc OR 'close' to
# ensure it is closed. separate function to prevent a bunch of try statements (Default value = 'r')
Returns
-------
"""
try:
self.hf_file.close()
finally:
if mode_.lower() != 'close':
self.hf_file = h5py.File(self.h5_full_file_name, mode_)
#
def augment_helper(keras_datagen, num_aug_ims, num_reg_ims, in_img, in_label):
"""
Parameters
----------
keras_datagen : keras_datagen: keras_datagen: keras.preprocessing.image.ImageDataGenerator
from keras.preprocessing.image import ImageDataGenerator-- keras_datagen = ImageDataGenerator(...)
num_aug_ims : int
number of augmented images to generate from single input image
num_reg_ims : int
number of copies of in_img to produce. will be stacked at the beginning of all_augment variable.
Use dot see augmentation when testing and can be useful if splitting into many H5s if you want an original in each.
in_img : numpy array
numpy array either 3D with color channel for the last dim ot 2D
in_label : int
the label associate with in_img. simply repeats it creating 'out_labels' the be size of 'all_augment'
Returns
-------
"""
if len(in_img.shape) == 2: # or not np.any(np.asarray(in_img.shape)==3)
in_img = np.repeat(in_img[..., np.newaxis], 3, -1) # for 2D arrays without color channels
set_zoom = keras_datagen.zoom_range
in_img = np.expand_dims(in_img, 0)
it = keras_datagen.flow(in_img, batch_size=1)
all_augment = np.tile(in_img, [num_reg_ims, 1, 1, 1])
for i in range(num_aug_ims): ##
if set_zoom != [0, 0]: # if zoom is being used...
# keras 'zoom' is annoying. it zooms x and y differently randomly
# in order to get an equal zoom I use the following workaround.
z_val = np.random.uniform(low=set_zoom[0], high=set_zoom[1])
keras_datagen.zoom_range = [z_val, z_val]
it = keras_datagen.flow(in_img, batch_size=1)
batch = it.next()
image = batch[0].astype('uint8')
all_augment = np.append(all_augment, np.expand_dims(image, 0), 0)
out_labels = np.repeat(in_label, sum([num_aug_ims, num_reg_ims]))
keras_datagen.zoom_range = set_zoom
return all_augment, out_labels
def img_unstacker(img_array, num_frames_wide=8, color_channel=True):
"""unstacks image stack and combines them into one large image for easy display. reads left to right and then top to bottom.
Parameters
----------
img_array : numpy array
stacked image array
num_frames_wide : int
width of destacked image. if = 8 with input 20 images it will be 8 wide 3 long and 4 blank images (Default value = 8)
Returns
-------
"""
im_stack = None
for i, k in enumerate(img_array):
if i % num_frames_wide == 0:
if i != 0: # stack it
if im_stack is None:
im_stack = im_stack_tmp
else:
im_stack = np.vstack((im_stack, im_stack_tmp))
im_stack_tmp = k # must be at the end
else:
im_stack_tmp = np.hstack((im_stack_tmp, k))
x = num_frames_wide - len(img_array) % num_frames_wide
if x != 0:
if x != num_frames_wide:
for i in range(x):
im_stack_tmp = np.hstack((im_stack_tmp, np.ones_like(k)))
if im_stack is None:
return im_stack_tmp
else:
im_stack = np.vstack((im_stack, im_stack_tmp))
return im_stack
def original_image(x):
"""This is used to transform batch generated images [-1 1] to the original image [0,255] for plotting
Parameters
----------
x :
Returns
-------
"""
image = tf.cast((x + 1) * 127.5, tf.uint8)
return image
def predict_multiple_H5_files(H5_file_list, model_2_load, append_model_and_labels_to_name_string=False,
batch_size=1000, model_2_load_is_model=False, save_on=False,
label_save_name=None, disable_TQDM=False,
save_labels_to_this_h5_file_instead=None) -> object:
"""
Parameters
----------
H5_file_list : list: list
list of string(s) of H5 file full paths
model_2_load : param append_model_and_labels_to_name_string: if True label_save_name = 'MODEL__' + label_save_name + '__labels',
it is a simple way to keep track of labels form many models in a single H5 file. also make sit easier to find :
those labels for later processing. :
either full path to model folder ending with ".ckpt" OR the loaded model itself. if the later,
the user MUST set "model_2_load_is_model" is True and "label_save_name" must be explicitly defined (when using model
path we use the model name to name the labels).
append_model_and_labels_to_name_string : bool
if True label_save_name = 'MODEL__' + label_save_name + '__labels',it is a simple way to keep track of labels
form many models in a single H5 file. also make sit easier to find those labels for later processing. (Default value = False)
batch_size : int
number of images to process per batch, -- slower prediction speeds << ideal predictionsspeed <<
memory issues and crashes -- 1000 is normally pretty good on Google CoLab (Default value = 1000)
model_2_load_is_model : bool
lets the program know if you are directly inserting a model (instead of a path to model folder) (Default value = False)
save_on : bool
saves to H5 file. either the original H5 (image source) or new H5 if a path to "save_labels_to_this_h5_file_instead"
is given (Default value = False)
label_save_name : string
h5 file key used to save the labels to, default is 'MODEL__' + **model_name** + '__labels'
disable_TQDM : bool
if True, turns off loading progress bar. (Default value = False)
save_labels_to_this_h5_file_instead : string
full path to H5 file to insert labels into instead of the H5 used as the image source (Default value = None)
Returns
-------
"""
for i, H5_file in enumerate(H5_file_list):
# save_what_is_left_of_your_h5_file(H5_file, do_del_and_rename = 1) # only matters if file is corrupt otherwise doesnt touch it
gen = ImageBatchGenerator(batch_size, [H5_file])
if model_2_load_is_model:
if label_save_name is None and save_on == True:
assert 1 == 0, 'label_save_name must be assigned if you are loading a model in directly and saveon == True.'
model = model_2_load
else:
if label_save_name is None:
label_save_name = model_2_load.split(os.path.sep)[-1].split('.')[0]
label_save_name = 'MODEL__' + label_save_name + '__labels'
append_model_and_labels_to_name_string = False # turn off because defaults to this naming scheme if user doesnt put in name
model = tf.keras.models.load_model(model_2_load)
if append_model_and_labels_to_name_string:
label_save_name = 'MODEL__' + label_save_name + '__labels'
start = time.time()
labels_2_save = np.asarray([])
for k in tqdm(range(gen.__len__()), disable=disable_TQDM):
TMP_X, tmp_y = gen.getXandY(k)
outY = model.predict(TMP_X)
labels_2_save = np.append(labels_2_save, outY)
total_seconds = time.time() - start
time_per_mil = np.round(1000000 * total_seconds / len(labels_2_save))
print(str(time_per_mil) + ' seconds per 1 million images predicted')
if save_on:
if save_labels_to_this_h5_file_instead is not None: # add to differnt H5 file
H5_file = save_labels_to_this_h5_file_instead # otherwise it will add to the current H5 file
# based on the loop through "H5_file_list" above
try:
hf.close()
except:
pass
with h5py.File(H5_file, 'r+') as hf:
try:
del hf[label_save_name]
time.sleep(10) # give time to process the deleted file... maybe???
hf.create_dataset(label_save_name, data=np.float64(labels_2_save))
except:
hf.create_dataset(label_save_name, data=np.float64(labels_2_save))
hf.close()
return labels_2_save
def get_total_frame_count(h5_file_list):
"""
Parameters
----------
h5_file_list :
Returns
-------
"""
total_frame_count = []
for H5_file in h5_file_list:
H5 = h5py.File(H5_file, 'r')
images = H5['images']
total_frame_count.append(images.shape[0])
return total_frame_count
def batch_size_file_ind_selector(num_in_each, batch_size):
"""batch_size_file_ind_selector - needed for ImageBatchGenerator to know which H5 file index
to use depending on the iteration number used in __getitem__ in the generator.
this all depends on the variable batch size.
Example: the output of the following...
batch_size_file_ind_selector([4000, 4001, 3999], [2000])
would be [0, 0, 1, 1, 1, 2, 2] which means that there are 2 chunks in the first
H5 file, 3 in the second and 2 in the third based on chunk size of 2000
Parameters
----------
num_in_each :
param batch_size:
batch_size :
Returns
-------
"""
break_into = np.ceil(np.array(num_in_each) / batch_size)
extract_inds = np.array([])
for k, elem in enumerate(break_into):
tmp1 = np.array(np.ones(np.int(elem)) * k)
extract_inds = np.concatenate((extract_inds, tmp1), axis=0)
return extract_inds
# file_inds_for_H5_extraction is the same as extract_inds output from the above function
def reset_to_first_frame_for_each_file_ind(file_inds_for_H5_extraction):
"""reset_to_first_frame_for_each_file_ind - uses the output of batch_size_file_ind_selector
to determine when to reset the index for each individual H5 file. using the above example
the out put would be [0, 0, 2, 2, 2, 5, 5], each would be subtracted from the indexing to
set the position of the index to 0 for each new H5 file.
Parameters
----------
file_inds_for_H5_extraction :
Returns
-------
"""
subtract_for_index = []
for k, elem in enumerate(file_inds_for_H5_extraction):
tmp1 = np.diff(file_inds_for_H5_extraction)
tmp1 = np.where(tmp1 != 0)
tmp1 = np.append(-1, tmp1[0]) + 1
subtract_for_index.append(tmp1[np.int(file_inds_for_H5_extraction[k])])
return subtract_for_index
class ImageBatchGenerator(keras.utils.Sequence):
""" """
def __init__(self, batch_size, h5_file_list, label_key = 'labels'):
h5_file_list = utils.make_list(h5_file_list, suppress_warning=True)
num_frames_in_all_H5_files = get_total_frame_count(h5_file_list)
file_inds_for_H5_extraction = batch_size_file_ind_selector(
num_frames_in_all_H5_files, batch_size)
subtract_for_index = reset_to_first_frame_for_each_file_ind(
file_inds_for_H5_extraction)
# self.to_fit = to_fit #set to True to return XY and False to return X
self.label_key = label_key
self.batch_size = batch_size
self.H5_file_list = h5_file_list
self.num_frames_in_all_H5_files = num_frames_in_all_H5_files
self.file_inds_for_H5_extraction = file_inds_for_H5_extraction
self.subtract_for_index = subtract_for_index
self.IMG_SIZE = 96
def __len__(self):
return len(self.file_inds_for_H5_extraction)
def __getitem__(self, num_2_extract):
b = self.batch_size
h = self.H5_file_list
i = self.file_inds_for_H5_extraction
H5_file = h[np.int(i[num_2_extract])]
with h5py.File(H5_file, 'r') as H5:
# H5 = h5py.File(H5_file, 'r')
images = H5['images']
num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]
raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
rgb_tensor = self.image_transform(raw_X)
labels_tmp = H5[self.label_key]
raw_Y = labels_tmp[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
H5.close()
return rgb_tensor, raw_Y
# def __getitem__(self, num_2_extract):
# b = self.batch_size
# h = self.H5_file_list
# i = self.file_inds_for_H5_extraction
# H5_file = h[np.int(i[num_2_extract])]
# H5 = h5py.File(H5_file, 'r')
# # list(H5.keys())
#
# images = H5['images']
# num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]
# raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
# rgb_tensor = self.image_transform(raw_X)
#
# # if self.to_fit:
# # labels_tmp = H5[self.label_key]
# # raw_Y = labels_tmp[b*num_2_extract_mod:b*(num_2_extract_mod+1)]
# # return rgb_tensor, raw_Y
# # else:
# return rgb_tensor
def getXandY(self, num_2_extract):
"""
Parameters
----------
num_2_extract :
Returns
-------
"""
b = self.batch_size
h = self.H5_file_list
i = self.file_inds_for_H5_extraction
H5_file = h[np.int(i[num_2_extract])]
H5 = h5py.File(H5_file, 'r')
# list(H5.keys())
images = H5['images']
num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]
raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
rgb_tensor = self.image_transform(raw_X)
labels_tmp = H5[self.label_key]
raw_Y = labels_tmp[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
return rgb_tensor, raw_Y
def image_transform(self, raw_X):
"""input num_of_images x H x W, image input must be grayscale
MobileNetV2 requires certain image dimensions
We use N x 61 x 61 formated images
self.IMG_SIZE is a single number to change the images into, images must be square
Parameters
----------
raw_X :
Returns
-------
"""
# rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)
# rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes
# rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2
# rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing
# self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)
# return rgb_tensor
if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:
rgb_batch = copy.deepcopy(raw_X)
else:
rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)
rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes
rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2
rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing
self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)
return rgb_tensor
def plot_batch_distribution(self):
""" """
# randomly select a batch and generate images and labels
batch_num = np.random.choice(np.arange(0, self.__len__()))
samp_x, samp_y = self.getXandY(batch_num)
# look at the distribution of classes
plt.pie([1 - np.mean(samp_y), np.mean(samp_y)],
labels=['non-touch frames', 'touch frames'], autopct='%1.1f%%', )
plt.title('class distribution from batch ' + str(batch_num))
plt.show()
# generate indices for positive and negative classes
images_to_sample = 20
neg_class = [i for i, val in enumerate(samp_y) if val == 0]
pos_class = [i for i, val in enumerate(samp_y) if val == 1]
neg_index = np.random.choice(neg_class, images_to_sample)
pos_index = np.random.choice(pos_class, images_to_sample)
# plot sample positive and negative class images
plt.figure(figsize=(10, 10))
samp_x = (samp_x + 1) / 2
for i in range(images_to_sample):
plt.subplot(5, 10, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
_ = plt.imshow(samp_x[neg_index[i]])
plt.xlabel('0')
plt.subplot(5, 10, images_to_sample + i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(samp_x[pos_index[i]])
plt.xlabel('1')
plt.suptitle('sample images from batch ' + str(batch_num))
plt.show()
def image_transform_(IMG_SIZE, raw_X):
"""
input num_of_images x H x W, image input must be grayscale
MobileNetV2 requires certain image dimensions
We use N x 61 x 61 formated images
self.IMG_SIZE is a single number to change the images into, images must be square
Parameters
----------
raw_X :
Returns
-------
"""
if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:
rgb_batch = copy.deepcopy(raw_X)
else:
rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)
rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes
rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2
rgb_tensor = tf.image.resize(rgb_tensor, (IMG_SIZE, IMG_SIZE)) # resizing
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
return rgb_tensor
| [
"whacc.utils.make_list",
"matplotlib.pyplot.grid",
"numpy.hstack",
"time.sleep",
"numpy.array",
"whacc.utils.loop_segments",
"tensorflow.keras.models.load_model",
"copy.deepcopy",
"tensorflow.cast",
"os.remove",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.repeat",
"numpy.where",
"nu... | [((950, 979), 'whacc.utils.loop_segments', 'utils.loop_segments', (['frames_1'], {}), '(frames_1)\n', (969, 979), False, 'from whacc import utils\n'), ((1494, 1517), 'numpy.asarray', 'np.asarray', (['array_group'], {}), '(array_group)\n', (1504, 1517), True, 'import numpy as np\n'), ((1989, 2036), 'whacc.utils.make_list', 'utils.make_list', (['h5_list'], {'suppress_warning': '(True)'}), '(h5_list, suppress_warning=True)\n', (2004, 2036), False, 'from whacc import utils\n'), ((22064, 22089), 'numpy.expand_dims', 'np.expand_dims', (['in_img', '(0)'], {}), '(in_img, 0)\n', (22078, 22089), True, 'import numpy as np\n'), ((22159, 22198), 'numpy.tile', 'np.tile', (['in_img', '[num_reg_ims, 1, 1, 1]'], {}), '(in_img, [num_reg_ims, 1, 1, 1])\n', (22166, 22198), True, 'import numpy as np\n'), ((24403, 24437), 'tensorflow.cast', 'tf.cast', (['((x + 1) * 127.5)', 'tf.uint8'], {}), '((x + 1) * 127.5, tf.uint8)\n', (24410, 24437), True, 'import tensorflow as tf\n'), ((30296, 30308), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (30304, 30308), True, 'import numpy as np\n'), ((38221, 38251), 'tensorflow.cast', 'tf.cast', (['rgb_batch', 'tf.float32'], {}), '(rgb_batch, tf.float32)\n', (38228, 38251), True, 'import tensorflow as tf\n'), ((38410, 38459), 'tensorflow.image.resize', 'tf.image.resize', (['rgb_tensor', '(IMG_SIZE, IMG_SIZE)'], {}), '(rgb_tensor, (IMG_SIZE, IMG_SIZE))\n', (38425, 38459), True, 'import tensorflow as tf\n'), ((1112, 1145), 'numpy.tile', 'np.tile', (['x[:, :, None]', 'tile_axes'], {}), '(x[:, :, None], tile_axes)\n', (1119, 1145), True, 'import numpy as np\n'), ((1306, 1339), 'numpy.concatenate', 'np.concatenate', (['(x, tmp1)'], {'axis': '(2)'}), '((x, tmp1), axis=2)\n', (1320, 1339), True, 'import numpy as np\n'), ((7569, 7594), 'numpy.sum', 'np.sum', (['split_percentages'], {}), '(split_percentages)\n', (7575, 7594), True, 'import numpy as np\n'), ((11165, 11188), 'h5py.File', 'h5py.File', (['h5file', '"""r+"""'], {}), "(h5file, 'r+')\n", (11174, 11188), False, 'import h5py\n'), ((13496, 13521), 'numpy.sum', 'np.sum', (['split_percentages'], {}), '(split_percentages)\n', (13502, 13521), True, 'import numpy as np\n'), ((21929, 21970), 'numpy.repeat', 'np.repeat', (['in_img[..., np.newaxis]', '(3)', '(-1)'], {}), '(in_img[..., np.newaxis], 3, -1)\n', (21938, 21970), True, 'import numpy as np\n'), ((24113, 24148), 'numpy.vstack', 'np.vstack', (['(im_stack, im_stack_tmp)'], {}), '((im_stack, im_stack_tmp))\n', (24122, 24148), True, 'import numpy as np\n'), ((27876, 27887), 'time.time', 'time.time', ([], {}), '()\n', (27885, 27887), False, 'import time\n'), ((27912, 27926), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (27922, 27926), True, 'import numpy as np\n'), ((29382, 29405), 'h5py.File', 'h5py.File', (['H5_file', '"""r"""'], {}), "(H5_file, 'r')\n", (29391, 29405), False, 'import h5py\n'), ((30425, 30469), 'numpy.concatenate', 'np.concatenate', (['(extract_inds, tmp1)'], {'axis': '(0)'}), '((extract_inds, tmp1), axis=0)\n', (30439, 30469), True, 'import numpy as np\n'), ((31218, 31254), 'numpy.diff', 'np.diff', (['file_inds_for_H5_extraction'], {}), '(file_inds_for_H5_extraction)\n', (31225, 31254), True, 'import numpy as np\n'), ((31270, 31289), 'numpy.where', 'np.where', (['(tmp1 != 0)'], {}), '(tmp1 != 0)\n', (31278, 31289), True, 'import numpy as np\n'), ((31601, 31653), 'whacc.utils.make_list', 'utils.make_list', (['h5_file_list'], {'suppress_warning': '(True)'}), '(h5_file_list, suppress_warning=True)\n', (31616, 31653), False, 'from whacc import utils\n'), ((34269, 34292), 'h5py.File', 'h5py.File', (['H5_file', '"""r"""'], {}), "(H5_file, 'r')\n", (34278, 34292), False, 'import h5py\n'), ((35778, 35808), 'tensorflow.cast', 'tf.cast', (['rgb_batch', 'tf.float32'], {}), '(rgb_batch, tf.float32)\n', (35785, 35808), True, 'import tensorflow as tf\n'), ((35975, 36034), 'tensorflow.image.resize', 'tf.image.resize', (['rgb_tensor', '(self.IMG_SIZE, self.IMG_SIZE)'], {}), '(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE))\n', (35990, 36034), True, 'import tensorflow as tf\n'), ((36632, 36642), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36640, 36642), True, 'import matplotlib.pyplot as plt\n'), ((36891, 36936), 'numpy.random.choice', 'np.random.choice', (['neg_class', 'images_to_sample'], {}), '(neg_class, images_to_sample)\n', (36907, 36936), True, 'import numpy as np\n'), ((36957, 37002), 'numpy.random.choice', 'np.random.choice', (['pos_class', 'images_to_sample'], {}), '(pos_class, images_to_sample)\n', (36973, 37002), True, 'import numpy as np\n'), ((37069, 37097), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (37079, 37097), True, 'import matplotlib.pyplot as plt\n'), ((37660, 37670), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37668, 37670), True, 'import matplotlib.pyplot as plt\n'), ((38112, 38132), 'copy.deepcopy', 'copy.deepcopy', (['raw_X'], {}), '(raw_X)\n', (38125, 38132), False, 'import copy\n'), ((38163, 38203), 'numpy.repeat', 'np.repeat', (['raw_X[..., np.newaxis]', '(3)', '(-1)'], {}), '(raw_X[..., np.newaxis], 3, -1)\n', (38172, 38203), True, 'import numpy as np\n'), ((1239, 1293), 'numpy.concatenate', 'np.concatenate', (['(x, imgs[stack_i][:, :, None])'], {'axis': '(2)'}), '((x, imgs[stack_i][:, :, None]), axis=2)\n', (1253, 1293), True, 'import numpy as np\n'), ((2086, 2103), 'h5py.File', 'h5py.File', (['k', '"""r"""'], {}), "(k, 'r')\n", (2095, 2103), False, 'import h5py\n'), ((2799, 2816), 'h5py.File', 'h5py.File', (['k', '"""r"""'], {}), "(k, 'r')\n", (2808, 2816), False, 'import h5py\n'), ((3763, 3782), 'os.mkdir', 'os.mkdir', (['fold_name'], {}), '(fold_name)\n', (3771, 3782), False, 'import os\n'), ((4154, 4171), 'os.remove', 'os.remove', (['new_fn'], {}), '(new_fn)\n', (4163, 4171), False, 'import os\n'), ((4218, 4240), 'h5py.File', 'h5py.File', (['new_fn', '"""w"""'], {}), "(new_fn, 'w')\n", (4227, 4240), False, 'import h5py\n'), ((4836, 4854), 'h5py.File', 'h5py.File', (['k2', '"""a"""'], {}), "(k2, 'a')\n", (4845, 4854), False, 'import h5py\n'), ((7726, 7753), 'h5py.File', 'h5py.File', (['h5_to_split', '"""r"""'], {}), "(h5_to_split, 'r')\n", (7735, 7753), False, 'import h5py\n'), ((7955, 7992), 'numpy.random.choice', 'np.random.choice', (['L', 'L'], {'replace': '(False)'}), '(L, L, replace=False)\n', (7971, 7992), True, 'import numpy as np\n'), ((13653, 13680), 'h5py.File', 'h5py.File', (['h5_to_split', '"""r"""'], {}), "(h5_to_split, 'r')\n", (13662, 13680), False, 'import h5py\n'), ((13826, 13863), 'numpy.random.choice', 'np.random.choice', (['L', 'L'], {'replace': '(False)'}), '(L, L, replace=False)\n', (13842, 13863), True, 'import numpy as np\n'), ((16964, 17001), 'os.path.isfile', 'os.path.isfile', (['h5_new_full_file_name'], {}), '(h5_new_full_file_name)\n', (16978, 17001), False, 'import os\n'), ((17015, 17047), 'os.remove', 'os.remove', (['h5_new_full_file_name'], {}), '(h5_new_full_file_name)\n', (17024, 17047), False, 'import os\n'), ((17161, 17199), 'h5py.File', 'h5py.File', (['h5_new_full_file_name', '"""r+"""'], {}), "(h5_new_full_file_name, 'r+')\n", (17170, 17199), False, 'import h5py\n'), ((17241, 17278), 'h5py.File', 'h5py.File', (['h5_new_full_file_name', '"""w"""'], {}), "(h5_new_full_file_name, 'w')\n", (17250, 17278), False, 'import h5py\n'), ((19272, 19288), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (19280, 19288), True, 'import numpy as np\n'), ((22469, 22521), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'set_zoom[0]', 'high': 'set_zoom[1]'}), '(low=set_zoom[0], high=set_zoom[1])\n', (22486, 22521), True, 'import numpy as np\n'), ((22746, 22770), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (22760, 22770), True, 'import numpy as np\n'), ((23790, 23818), 'numpy.hstack', 'np.hstack', (['(im_stack_tmp, k)'], {}), '((im_stack_tmp, k))\n', (23799, 23818), True, 'import numpy as np\n'), ((27695, 27735), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_2_load'], {}), '(model_2_load)\n', (27721, 27735), True, 'import tensorflow as tf\n'), ((28106, 28136), 'numpy.append', 'np.append', (['labels_2_save', 'outY'], {}), '(labels_2_save, outY)\n', (28115, 28136), True, 'import numpy as np\n'), ((28161, 28172), 'time.time', 'time.time', ([], {}), '()\n', (28170, 28172), False, 'import time\n'), ((30241, 30262), 'numpy.array', 'np.array', (['num_in_each'], {}), '(num_in_each)\n', (30249, 30262), True, 'import numpy as np\n'), ((31305, 31327), 'numpy.append', 'np.append', (['(-1)', 'tmp1[0]'], {}), '(-1, tmp1[0])\n', (31314, 31327), True, 'import numpy as np\n'), ((32612, 32636), 'numpy.int', 'np.int', (['i[num_2_extract]'], {}), '(i[num_2_extract])\n', (32618, 32636), True, 'import numpy as np\n'), ((32651, 32674), 'h5py.File', 'h5py.File', (['H5_file', '"""r"""'], {}), "(H5_file, 'r')\n", (32660, 32674), False, 'import h5py\n'), ((34230, 34254), 'numpy.int', 'np.int', (['i[num_2_extract]'], {}), '(i[num_2_extract])\n', (34236, 34254), True, 'import numpy as np\n'), ((35657, 35677), 'copy.deepcopy', 'copy.deepcopy', (['raw_X'], {}), '(raw_X)\n', (35670, 35677), False, 'import copy\n'), ((35716, 35756), 'numpy.repeat', 'np.repeat', (['raw_X[..., np.newaxis]', '(3)', '(-1)'], {}), '(raw_X[..., np.newaxis], 3, -1)\n', (35725, 35756), True, 'import numpy as np\n'), ((37186, 37211), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(10)', '(i + 1)'], {}), '(5, 10, i + 1)\n', (37197, 37211), True, 'import matplotlib.pyplot as plt\n'), ((37224, 37238), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (37234, 37238), True, 'import matplotlib.pyplot as plt\n'), ((37251, 37265), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (37261, 37265), True, 'import matplotlib.pyplot as plt\n'), ((37278, 37293), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (37286, 37293), True, 'import matplotlib.pyplot as plt\n'), ((37310, 37342), 'matplotlib.pyplot.imshow', 'plt.imshow', (['samp_x[neg_index[i]]'], {}), '(samp_x[neg_index[i]])\n', (37320, 37342), True, 'import matplotlib.pyplot as plt\n'), ((37355, 37370), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""0"""'], {}), "('0')\n", (37365, 37370), True, 'import matplotlib.pyplot as plt\n'), ((37384, 37428), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(10)', '(images_to_sample + i + 1)'], {}), '(5, 10, images_to_sample + i + 1)\n', (37395, 37428), True, 'import matplotlib.pyplot as plt\n'), ((37441, 37455), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (37451, 37455), True, 'import matplotlib.pyplot as plt\n'), ((37468, 37482), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (37478, 37482), True, 'import matplotlib.pyplot as plt\n'), ((37495, 37510), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (37503, 37510), True, 'import matplotlib.pyplot as plt\n'), ((37523, 37555), 'matplotlib.pyplot.imshow', 'plt.imshow', (['samp_x[pos_index[i]]'], {}), '(samp_x[pos_index[i]])\n', (37533, 37555), True, 'import matplotlib.pyplot as plt\n'), ((37568, 37583), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""1"""'], {}), "('1')\n", (37578, 37583), True, 'import matplotlib.pyplot as plt\n'), ((2155, 2181), 'numpy.asarray', 'np.asarray', (['h[key_name][:]'], {}), '(h[key_name][:])\n', (2165, 2181), True, 'import numpy as np\n'), ((2222, 2259), 'numpy.concatenate', 'np.concatenate', (['(out, h[key_name][:])'], {}), '((out, h[key_name][:]))\n', (2236, 2259), True, 'import numpy as np\n'), ((4265, 4283), 'h5py.File', 'h5py.File', (['h5', '"""r"""'], {}), "(h5, 'r')\n", (4274, 4283), False, 'import h5py\n'), ((7905, 7929), 'numpy.random.seed', 'np.random.seed', (['set_seed'], {}), '(set_seed)\n', (7919, 7929), True, 'import numpy as np\n'), ((8366, 8401), 'whacc.utils.loop_segments', 'utils.loop_segments', (['tmp_frame_list'], {}), '(tmp_frame_list)\n', (8385, 8401), False, 'from whacc import utils\n'), ((11675, 11695), 'numpy.shape', 'np.shape', (['frame_nums'], {}), '(frame_nums)\n', (11683, 11695), True, 'import numpy as np\n'), ((13776, 13800), 'numpy.random.seed', 'np.random.seed', (['set_seed'], {}), '(set_seed)\n', (13790, 13800), True, 'import numpy as np\n'), ((18521, 18537), 'numpy.shape', 'np.shape', (['images'], {}), '(images)\n', (18529, 18537), True, 'import numpy as np\n'), ((18909, 18925), 'numpy.shape', 'np.shape', (['images'], {}), '(images)\n', (18917, 18925), True, 'import numpy as np\n'), ((20861, 20901), 'h5py.File', 'h5py.File', (['self.h5_full_file_name', 'mode_'], {}), '(self.h5_full_file_name, mode_)\n', (20870, 20901), False, 'import h5py\n'), ((28725, 28749), 'h5py.File', 'h5py.File', (['H5_file', '"""r+"""'], {}), "(H5_file, 'r+')\n", (28734, 28749), False, 'import h5py\n'), ((31371, 31409), 'numpy.int', 'np.int', (['file_inds_for_H5_extraction[k]'], {}), '(file_inds_for_H5_extraction[k])\n', (31377, 31409), True, 'import numpy as np\n'), ((36455, 36470), 'numpy.mean', 'np.mean', (['samp_y'], {}), '(samp_y)\n', (36462, 36470), True, 'import numpy as np\n'), ((994, 1025), 'numpy.random.random', 'np.random.random', (['imgs[0].shape'], {}), '(imgs[0].shape)\n', (1010, 1025), True, 'import numpy as np\n'), ((10050, 10065), 'numpy.asarray', 'np.asarray', (['ims'], {}), '(ims)\n', (10060, 10065), True, 'import numpy as np\n'), ((10067, 10085), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (10077, 10085), True, 'import numpy as np\n'), ((10108, 10157), 'h5py.File', 'h5py.File', (['h5_creators[i].h5_full_file_name', '"""r+"""'], {}), "(h5_creators[i].h5_full_file_name, 'r+')\n", (10117, 10157), False, 'import h5py\n'), ((10359, 10396), 'numpy.asarray', 'np.asarray', (['list_of_new_frame_nums[i]'], {}), '(list_of_new_frame_nums[i])\n', (10369, 10396), True, 'import numpy as np\n'), ((15439, 15454), 'numpy.asarray', 'np.asarray', (['ims'], {}), '(ims)\n', (15449, 15454), True, 'import numpy as np\n'), ((15456, 15474), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (15466, 15474), True, 'import numpy as np\n'), ((23662, 23697), 'numpy.vstack', 'np.vstack', (['(im_stack, im_stack_tmp)'], {}), '((im_stack, im_stack_tmp))\n', (23671, 23697), True, 'import numpy as np\n'), ((28842, 28856), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (28852, 28856), False, 'import time\n'), ((30383, 30395), 'numpy.int', 'np.int', (['elem'], {}), '(elem)\n', (30389, 30395), True, 'import numpy as np\n'), ((36438, 36453), 'numpy.mean', 'np.mean', (['samp_y'], {}), '(samp_y)\n', (36445, 36453), True, 'import numpy as np\n'), ((3946, 3966), 'os.path.basename', 'os.path.basename', (['h5'], {}), '(h5)\n', (3962, 3966), False, 'import os\n'), ((4021, 4040), 'os.path.dirname', 'os.path.dirname', (['h5'], {}), '(h5)\n', (4036, 4040), False, 'import os\n'), ((4057, 4077), 'os.path.basename', 'os.path.basename', (['h5'], {}), '(h5)\n', (4073, 4077), False, 'import os\n'), ((24013, 24028), 'numpy.ones_like', 'np.ones_like', (['k'], {}), '(k)\n', (24025, 24028), True, 'import numpy as np\n'), ((9903, 9918), 'numpy.asarray', 'np.asarray', (['ims'], {}), '(ims)\n', (9913, 9918), True, 'import numpy as np\n'), ((9920, 9938), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (9930, 9938), True, 'import numpy as np\n'), ((15292, 15307), 'numpy.asarray', 'np.asarray', (['ims'], {}), '(ims)\n', (15302, 15307), True, 'import numpy as np\n'), ((15309, 15327), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (15319, 15327), True, 'import numpy as np\n'), ((28970, 28995), 'numpy.float64', 'np.float64', (['labels_2_save'], {}), '(labels_2_save)\n', (28980, 28995), True, 'import numpy as np\n'), ((8061, 8094), 'numpy.cumsum', 'np.cumsum', (['split_percentages[:-1]'], {}), '(split_percentages[:-1])\n', (8070, 8094), True, 'import numpy as np\n'), ((10513, 10533), 'numpy.shape', 'np.shape', (['frame_nums'], {}), '(frame_nums)\n', (10521, 10533), True, 'import numpy as np\n'), ((14040, 14073), 'numpy.cumsum', 'np.cumsum', (['split_percentages[:-1]'], {}), '(split_percentages[:-1])\n', (14049, 14073), True, 'import numpy as np\n'), ((29081, 29106), 'numpy.float64', 'np.float64', (['labels_2_save'], {}), '(labels_2_save)\n', (29091, 29106), True, 'import numpy as np\n')] |
import pandas as pd
import os
import random
train_df = pd.read_csv("./data/Train_DataSet.csv")
train_label_df = pd.read_csv("./data/Train_DataSet_Label.csv")
test_df = pd.read_csv("./data/Test_DataSet.csv")
train_df = train_df.merge(train_label_df, on='id', how='left')
train_df['label'] = train_df['label'].fillna(-1)
train_df = train_df[train_df['label'] != -1]
train_df['label'] = train_df['label'].astype(int)
test_df['label'] = 0
test_df['content'] = test_df['content'].fillna('无')
train_df['content'] = train_df['content'].fillna('无')
test_df['title'] = test_df['title'].fillna('无')
train_df['title'] = train_df['title'].fillna('无')
index = set(range(train_df.shape[0]))
K_fold = []
for i in range(5):
if i == 4:
tmp = index
else:
tmp = random.sample(index, int(1.0 / 5 * train_df.shape[0]))
index = index - set(tmp)
print("Number:", len(tmp))
K_fold.append(tmp)
for i in range(5):
print("Fold", i)
if os.path.exists('./data/data_{}'.format(i)):
os.system("rm -rf ./data/data_{}".format(i))
os.system("mkdir ./data/data_{}".format(i))
dev_index = list(K_fold[i])
train_index = []
for j in range(5):
if j != i:
train_index += K_fold[j]
train_df.iloc[train_index].to_csv("./data/data_{}/train.csv".format(i))
train_df.iloc[dev_index].to_csv("./data/data_{}/dev.csv".format(i))
test_df.to_csv("./data/data_{}/test.csv".format(i))
| [
"pandas.read_csv"
] | [((56, 95), 'pandas.read_csv', 'pd.read_csv', (['"""./data/Train_DataSet.csv"""'], {}), "('./data/Train_DataSet.csv')\n", (67, 95), True, 'import pandas as pd\n'), ((113, 158), 'pandas.read_csv', 'pd.read_csv', (['"""./data/Train_DataSet_Label.csv"""'], {}), "('./data/Train_DataSet_Label.csv')\n", (124, 158), True, 'import pandas as pd\n'), ((169, 207), 'pandas.read_csv', 'pd.read_csv', (['"""./data/Test_DataSet.csv"""'], {}), "('./data/Test_DataSet.csv')\n", (180, 207), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""
SPM (Simple Parameter Mode) is an alternate command input mode for NICOS where
entering Python code is not required.
The syntax is very simple and allows no variables, loops or conditionals: a
command line consists of a command and optional arguments, separated by spaces.
Arguments can be numbers, device names, strings and symbols (words that signify
a command option). Strings can be quoted or unquoted as long as they start
with a nondigit character.
Examples::
read
move a1 180
scan sth 10.4 0.4 25 t 2
"""
# XXX SPM todos:
# * figure out how to convert code examples in docstrings
# * add a way to make commands unavailable (e.g. manualscan)
import re
from itertools import chain, cycle, islice
from nicos.core.device import Device
from nicos.core.errors import SPMError
id_re = re.compile('[a-zA-Z_][a-zA-Z0-9_]*$')
string1_re = re.compile(r"'(\\\\|\\'|[^'])*'")
string2_re = re.compile(r'"(\\\\|\\"|[^"])*"')
spaces_re = re.compile(r'\s+')
nospace_re = re.compile(r'[^ \t;]+')
def spmsyntax(*arguments, **options):
"""Decorator to give a function specific SPM syntax advice, for parameter
checking and completion.
"""
def deco(func):
func.spmsyntax = arguments, options
return func
return deco
class bare(str):
"""String that repr()s as itself without quotes."""
def __repr__(self):
return str(self)
class NoParse(Exception):
def __init__(self, expected, token):
Exception.__init__(self, expected, token)
self.token = token
self.expected = expected
class Token:
desc = 'token'
def handle(self, arg, session):
raise NoParse('strange token', arg)
def complete(self, text, session, argsofar):
return []
class String(Token):
desc = 'string'
def handle(self, arg, session):
if string1_re.match(arg) or string2_re.match(arg):
return bare(arg)
return arg
String = String()
class Bare(Token):
desc = 'value'
def handle(self, arg, session):
if id_re.match(arg):
if arg not in session.namespace:
return arg
return bare('(' + arg + ')')
Bare = Bare()
class Num(Token):
desc = 'number'
def handle(self, arg, session):
try:
return float(arg)
except ValueError:
raise NoParse('number', arg) from None
Num = Num()
class Int(Token):
desc = 'integer'
def handle(self, arg, session):
try:
return int(arg)
except ValueError:
raise NoParse('integer', arg) from None
Int = Int()
class Oneof(Token):
def __init__(self, *choices):
self.choices = choices
@property
def desc(self):
return 'one of ' + ', '.join(self.choices)
def handle(self, arg, session):
if arg.lower() not in self.choices:
raise NoParse(self.desc, arg)
return arg.lower()
def complete(self, text, session, argsofar):
return [c for c in self.choices if c.startswith(text)]
class Bool(Token):
desc = 'boolean'
def handle(self, arg, session):
if arg.lower() not in ['true', 'false']:
raise NoParse('true or false', arg)
return bare(arg.capitalize())
def complete(self, text, session, argsofar):
return [c for c in ['true', 'false'] if c.startswith(text)]
Bool = Bool()
class Dev(Token):
desc = 'device name'
def __init__(self, devtype=Device):
self.devtype = devtype
def clsrep(self, cls):
if isinstance(cls, tuple):
return ' or '.join(self.clsrep(c) for c in cls)
return cls.__name__
def handle(self, arg, session):
if arg not in session.explicit_devices:
raise NoParse('device name', arg)
if not isinstance(session.devices[arg], self.devtype):
raise NoParse('%s device' % self.clsrep(self.devtype), arg)
return bare(arg)
def complete(self, text, session, argsofar):
return [dev for dev in session.explicit_devices if dev.startswith(text)
and isinstance(session.devices[dev], self.devtype)]
AnyDev = Dev()
class DevParam(Token):
desc = 'parameter name'
def handle(self, arg, session):
return arg
def complete(self, text, session, argsofar):
try:
dev = session.getDevice(argsofar[-2])
return [p for p in dev.parameters if p.startswith(text)]
except Exception:
return []
DevParam = DevParam()
class SetupName(Token):
desc = 'setup name'
def __init__(self, what):
self.what = what
def handle(self, arg, session):
if arg not in session._setup_info:
raise NoParse('setup name', arg)
return arg
def complete(self, text, session, argsofar):
all_setups = [name for (name, info) in session._setup_info.items()
if info and info['group'] in ('basic', 'optional',
'plugplay', '')]
if self.what == 'all':
candidates = all_setups
elif self.what == 'unloaded':
candidates = [setup for setup in all_setups
if setup not in session.explicit_setups]
elif self.what == 'loaded':
candidates = session.explicit_setups
return [c for c in candidates if c.startswith(text)]
class DeviceName(Token):
desc = 'device name'
def handle(self, arg, session):
if arg not in session.configured_devices:
raise NoParse('device name', arg)
return arg
def complete(self, text, session, argsofar):
return [c for c in session.configured_devices
if c.startswith(text) and
c not in session.devices and not
session.configured_devices[c][1].get('lowlevel')]
DeviceName = DeviceName()
class Multi:
def __init__(self, *types):
self.types = types
class SPMHandler:
"""The main handler for SPM commands."""
def __init__(self, session):
self.session = session
def error(self, msg):
raise SPMError(msg)
def complete(self, command, word):
def select(candidates, word):
return [c for c in candidates if c.startswith(word)]
try:
# XXX could complete "?" too
if command.startswith(('!', '?')) or command.endswith('?'):
return []
if command.startswith(':'):
return self.complete(command[1:].strip(), word)
commands = self.tokenize(command, partial=True)
tokens = commands[-1] # only last command is interesting
if not word:
tokens.append('')
command = tokens[0]
if len(tokens) == 1:
# complete command
return select([n for (n, o) in self.session.namespace.items()
if hasattr(o, 'is_usercommand') or
isinstance(o, Device)], word)
cmdobj = self.session.namespace.get(command)
if isinstance(cmdobj, Device):
return []
if not hasattr(cmdobj, 'is_usercommand'):
return []
return self.complete_command(cmdobj, tokens[1:], word)
except Exception as err:
self.session.log.debug('error during completion: %s', err)
return []
def complete_command(self, command, args, word):
syntax = getattr(command, 'spmsyntax', None)
if syntax is None:
return []
arguments, options = syntax
posargs = len(arguments)
multargs = 0
if arguments and isinstance(arguments[-1], Multi):
multargs = len(arguments[-1].types)
posargs -= 1
arguments = chain(arguments[:-1], cycle(arguments[-1].types))
# assume we're completing the last word on the command line
if multargs or len(args) <= posargs:
# is it a positional argument
el = next(islice(arguments, len(args) - 1, len(args)))
return el.complete(word, self.session, args)
else:
# must be an option
which = (len(args) - posargs) % 2
if which == 1:
# option name
return [n for n in options if n.startswith(word)]
else:
# option value
optname = args[-2]
if optname in options:
return options[optname].complete(word, self.session, args)
return []
def handle_script(self, code, fn):
lines = []
for lineno, command in enumerate(code.splitlines()):
try:
lines.append(self.handle_line(command))
except SPMError as err:
err.args = ('in %s, line %d: ' % (fn or 'unnamed',
lineno + 1) + err.args[0],)
raise
return '\n'.join(lines)
def handle_line(self, command):
if command.startswith('#'):
# Comments (only in script files)
return 'pass'
if command.startswith('!'):
# Python escape
return command[1:].strip()
if command.startswith('?') or command.endswith('?'):
# Help escape
return 'help(%s)' % command.strip('?')
if command.startswith(':'):
# Simulation escape
code = self.handle_line(command[1:])
return 'sim(%r)' % code
try:
commands = self.tokenize(command)
except NoParse as err:
return self.error('could not parse starting at %r, expected %s' %
(err.token, err.expected))
code = []
for tokens in commands:
if not tokens:
code.append('pass')
continue
command = tokens[0]
cmdobj = self.session.namespace.get(command)
if hasattr(cmdobj, 'is_usercommand'):
code.append(self.handle_command(cmdobj, tokens[1:]))
elif isinstance(cmdobj, Device):
code.append(self.handle_device(cmdobj, tokens[1:]))
else:
return self.error('no such command or device: %r' % command)
return '; '.join(code)
def tokenize(self, command, partial=False):
rest = command
commands = [[]]
tokens = commands[0]
while rest:
if rest.startswith("'"):
m = string1_re.match(rest)
if not m:
if partial:
tokens.append(rest)
return tokens
raise NoParse('single-quoted string', rest)
tokens.append(m.group())
rest = rest[m.end():]
elif rest.startswith('"'):
m = string2_re.match(rest)
if not m:
if partial:
tokens.append(rest)
return tokens
raise NoParse('double-quoted string', rest)
tokens.append(m.group())
rest = rest[m.end():]
elif rest.startswith('('):
i = 1
while i < len(rest):
if rest[i] == ')':
break
i += 1
else:
if partial:
tokens.append(rest)
return tokens
raise NoParse('closing parenthesis', rest)
tokens.append(rest[:i + 1])
rest = rest[i + 1:]
elif rest.startswith('['):
i = 1
while i < len(rest):
if rest[i] == ']':
break
i += 1
else:
if partial:
tokens.append(rest)
return tokens
raise NoParse('closing bracket', rest)
tokens.append(rest[:i + 1])
rest = rest[i + 1:]
elif rest[0].isspace():
m = spaces_re.match(rest)
rest = rest[m.end():]
elif rest.startswith(';'):
# serial command execution
commands.append([])
tokens = commands[-1]
rest = rest[1:]
else:
m = nospace_re.match(rest)
tokens.append(m.group())
rest = rest[m.end():]
return commands
def handle_device(self, device, args):
if not args:
return 'read(%s)' % device
elif len(args) == 1:
return 'maw(%s, %s)' % (device, args[0])
return self.error('too many arguments for simple device command')
def handle_command(self, command, args):
syntax = getattr(command, 'spmsyntax', None)
if syntax is None:
syntax = ((Bare,) * len(args), {})
arguments, options = syntax
posargs = len(arguments)
multargs = 1
if arguments and isinstance(arguments[-1], Multi):
multargs = len(arguments[-1].types)
posargs -= 1
arguments = chain(arguments[:-1], cycle(arguments[-1].types))
# first, parse positional arguments (all must be given)
cmdargs = []
nargs = 0
for element in arguments:
if not args:
if nargs < posargs or (nargs - posargs) % multargs != 0:
return self.error('premature end of command, expected %s'
% element.desc)
break
try:
parg = element.handle(args[0], self.session)
except NoParse as err:
return self.error('invalid argument at %r, expected %s' %
(err.token, err.expected))
cmdargs.append(parg)
args = args[1:]
nargs += 1
# now come options
cmdopts = {}
if len(args) % 2:
return self.error('too many arguments at %r, expected end of '
'command' % args[-1])
while args:
opt, val = args[:2]
args = args[2:]
if not id_re.match(opt):
return self.error('invalid syntax at %r, expected option name'
% opt)
if opt in options:
try:
val = options[opt].handle(val, self.session)
except NoParse as err:
return self.error('invalid argument at %r, expected %s' %
(err.token, err.expected))
else:
val = bare(val)
cmdopts[opt] = val
# now nothing should be left
return command.__name__ + '(*%s, **%s)' % (cmdargs, cmdopts)
| [
"itertools.cycle",
"nicos.core.errors.SPMError",
"re.compile"
] | [((1867, 1904), 're.compile', 're.compile', (['"""[a-zA-Z_][a-zA-Z0-9_]*$"""'], {}), "('[a-zA-Z_][a-zA-Z0-9_]*$')\n", (1877, 1904), False, 'import re\n'), ((1918, 1956), 're.compile', 're.compile', (['"""\'(\\\\\\\\\\\\\\\\|\\\\\\\\\'|[^\'])*\'"""'], {}), '("\'(\\\\\\\\\\\\\\\\|\\\\\\\\\'|[^\'])*\'")\n', (1928, 1956), False, 'import re\n'), ((1965, 2003), 're.compile', 're.compile', (['""""(\\\\\\\\\\\\\\\\|\\\\\\\\"|[^"])*\\""""'], {}), '(\'"(\\\\\\\\\\\\\\\\|\\\\\\\\"|[^"])*"\')\n', (1975, 2003), False, 'import re\n'), ((2011, 2029), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (2021, 2029), False, 'import re\n'), ((2043, 2066), 're.compile', 're.compile', (['"""[^ \\\\t;]+"""'], {}), "('[^ \\\\t;]+')\n", (2053, 2066), False, 'import re\n'), ((7218, 7231), 'nicos.core.errors.SPMError', 'SPMError', (['msg'], {}), '(msg)\n', (7226, 7231), False, 'from nicos.core.errors import SPMError\n'), ((8948, 8974), 'itertools.cycle', 'cycle', (['arguments[-1].types'], {}), '(arguments[-1].types)\n', (8953, 8974), False, 'from itertools import chain, cycle, islice\n'), ((14459, 14485), 'itertools.cycle', 'cycle', (['arguments[-1].types'], {}), '(arguments[-1].types)\n', (14464, 14485), False, 'from itertools import chain, cycle, islice\n')] |
import numpy as np
import apm_id as arx
######################################################
# Configuration
######################################################
# number of terms
ny = 2 # output coefficients
nu = 1 # input coefficients
# number of inputs
ni = 1
# number of outputs
no = 1
# load data and parse into columns
data = np.loadtxt('data_step_test.csv',delimiter=',')
######################################################
# generate time-series model
arx.apm_id(data,ni,nu,ny)
| [
"numpy.loadtxt",
"apm_id.apm_id"
] | [((351, 398), 'numpy.loadtxt', 'np.loadtxt', (['"""data_step_test.csv"""'], {'delimiter': '""","""'}), "('data_step_test.csv', delimiter=',')\n", (361, 398), True, 'import numpy as np\n'), ((487, 515), 'apm_id.apm_id', 'arx.apm_id', (['data', 'ni', 'nu', 'ny'], {}), '(data, ni, nu, ny)\n', (497, 515), True, 'import apm_id as arx\n')] |
#!/usr/bin/env python
'''
setup
=====
This is a relatively complicated setup script, since
it does a few things to simplify version control
and configuration files.
There's a simple script that overrides the `build_py`
command to ensure there's proper version control set
for the library.
There's also a more complex `configure` command
that configures all images from template files,
and also configures the `cmake` wrapper and the
shell version information.
'''
# IMPORTS
# -------
import ast
import enum
import glob
import itertools
import json
import re
import os
import setuptools
import shutil
import stat
import subprocess
import sys
import textwrap
try:
from setuptools import setup, Command
from setuptools.command.build_py import build_py
from setuptools.command.install import install
has_setuptools = True
except ImportError:
from distutils.core import setup, Command
from distutils.command.build_py import build_py
from distutils.command.install import install
has_setuptools = False
try:
import py2exe
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print('Cannot import py2exe', file=sys.stderr)
exit(1)
# CONFIG
# ------
def load_json(path):
'''Load JSON files with C++-style comments.'''
# Note: we need comments for maintainability, so we
# can annotate what works and the rationale, but
# we don't want to prevent code from working without
# a complex parser, so we do something very simple:
# only remove lines starting with '//'.
with open(path) as file:
lines = file.read().splitlines()
lines = [i for i in lines if not i.strip().startswith('//')]
return json.loads('\n'.join(lines))
HOME = os.path.dirname(os.path.realpath(__file__))
config = load_json(f'{HOME}/config/config.json')
# A lot of logic depends on being on the proper directory:
# this allows us to do out-of-source builds.
os.chdir(HOME)
def get_version(key):
'''Get the version data from the JSON config.'''
data = config[key]['version']
major = data['major']
minor = data['minor']
patch = data.get('patch', '')
release = data.get('release', '')
number = data.get('number', '')
build = data.get('build', '')
return (major, minor, patch, release, number, build)
# Read the xcross version information.
major, minor, patch, release, number, build = get_version('xcross')
version = f'{major}.{minor}'
if patch != '0':
version = f'{version}.{patch}'
release_type = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc', 'post': '.post'}
if release and not number:
raise ValueError('Must provide a release number with a non-final build.')
elif release:
version = f'{version}{release_type[release]}{number}'
# py2exe version is valid one of the following:
# [0-255].[0-255].[0-65535]
# [0-255].[0-255].[0-255].[0-255]
# Therefore, we can never provide release candidate
# values or omit the patch field.
py2exe_version = f'{major}.{minor}.{patch}'
docker_major, docker_minor, docker_patch, docker_build, *_ = get_version('docker')
docker_version = f'{docker_major}.{docker_minor}'
if docker_patch != '0':
docker_version = f'{docker_version}.{docker_patch}'
# Read the dependency version information.
# This is the GCC and other utilities version from crosstool-NG.
ubuntu_major, ubuntu_minor, *_ = get_version('ubuntu')
ubuntu_version = f'{ubuntu_major}.{ubuntu_minor}'
emsdk_major, emsdk_minor, emsdk_patch, *_ = get_version('emsdk')
emsdk_version = f'{emsdk_major}.{emsdk_minor}.{emsdk_patch}'
gcc_major, gcc_minor, gcc_patch, *_ = get_version('gcc')
gcc_version = f'{gcc_major}.{gcc_minor}.{gcc_patch}'
binutils_major, binutils_minor, *_ = get_version('binutils')
binutils_version = f'{binutils_major}.{binutils_minor}'
mingw_major, mingw_minor, mingw_patch, *_ = get_version('mingw')
mingw_version = f'{mingw_major}.{mingw_minor}.{mingw_patch}'
glibc_major, glibc_minor, *_ = get_version('glibc')
glibc_version = f'{glibc_major}.{glibc_minor}'
musl_major, musl_minor, musl_patch, *_ = get_version('musl')
musl_version = f'{musl_major}.{musl_minor}.{musl_patch}'
musl_cross_major, musl_cross_minor, musl_cross_patch, *_ = get_version('musl-cross')
musl_cross_version = f'{musl_cross_major}.{musl_cross_minor}.{musl_cross_patch}'
avr_major, avr_minor, avr_patch, *_ = get_version('avr')
avr_version = f'{avr_major}.{avr_minor}.{avr_patch}'
uclibc_major, uclibc_minor, uclibc_patch, *_ = get_version('uclibc')
uclibc_version = f'{uclibc_major}.{uclibc_minor}.{uclibc_patch}'
expat_major, expat_minor, expat_patch, *_ = get_version('expat')
expat_version = f'{expat_major}.{expat_minor}.{expat_patch}'
isl_major, isl_minor, *_ = get_version('isl')
isl_version = f'{isl_major}.{isl_minor}'
linux_major, linux_minor, linux_patch, *_ = get_version('linux')
linux_version = f'{linux_major}.{linux_minor}.{linux_patch}'
linux_headers_major, linux_headers_minor, linux_headers_patch, *_ = get_version('linux-headers')
linux_headers_version = f'{linux_headers_major}.{linux_headers_minor}.{linux_headers_patch}'
gmp_major, gmp_minor, gmp_patch, *_ = get_version('gmp')
gmp_version = f'{gmp_major}.{gmp_minor}.{gmp_patch}'
mpc_major, mpc_minor, mpc_patch, *_ = get_version('mpc')
mpc_version = f'{mpc_major}.{mpc_minor}.{mpc_patch}'
mpfr_major, mpfr_minor, mpfr_patch, *_ = get_version('mpfr')
mpfr_version = f'{mpfr_major}.{mpfr_minor}.{mpfr_patch}'
buildroot_major, buildroot_minor, buildroot_patch, *_ = get_version('buildroot')
buildroot_version = f'{buildroot_major}.{buildroot_minor}.{buildroot_patch}'
ct_major, ct_minor, ct_patch, *_ = get_version('crosstool-ng')
ct_version = f'{ct_major}.{ct_minor}.{ct_patch}'
qemu_major, qemu_minor, qemu_patch, *_ = get_version('qemu')
qemu_version = f'{qemu_major}.{qemu_minor}.{qemu_patch}'
riscv_toolchain_version = config['riscv-gnu-toolchain']['riscv-version']
riscv_binutils_version = config['riscv-gnu-toolchain']['binutils-version']
riscv_gdb_version = config['riscv-gnu-toolchain']['gdb-version']
riscv_glibc_version = config['riscv-gnu-toolchain']['glibc-version']
riscv_newlib_version = config['riscv-gnu-toolchain']['newlib-version']
# Other config options.
bin_directory = f'{config["options"]["sysroot"]}/bin/'
# Read the long description.
description = 'Zero-setup cross compilation.'
with open(f'{HOME}/README.md') as file:
long_description = file.read()
# COMMANDS
# --------
# Literal boolean type for command arguments.
bool_type = (type(None), bool, int)
def parse_literal(inst, key, default, valid_types=None):
'''Parse literal user options.'''
value = getattr(inst, key)
if value != default:
value = ast.literal_eval(value)
if valid_types is not None:
assert isinstance(value, valid_types)
setattr(inst, key, value)
def check_call(code):
'''Wrap `subprocess.call` to exit on failure.'''
if code != 0:
sys.exit(code)
def has_module(module):
'''Check if the given module is installed.'''
devnull = subprocess.DEVNULL
code = subprocess.call(
[sys.executable, '-m', module, '--version'],
stdout=devnull,
stderr=devnull,
)
return code == 0
def semver():
'''Create a list of semantic versions for images.'''
versions = [
f'{docker_major}.{docker_minor}',
f'{docker_major}.{docker_minor}.{docker_patch}'
]
if docker_major != '0':
versions.append(docker_major)
return versions
def image_from_target(target, with_pkg=False):
'''Get the full image name from the target.'''
username = config['metadata']['username']
repository = config['metadata']['repository']
if with_pkg:
repository = f'pkg{repository}'
return f'{username}/{repository}:{target}'
def sorted_image_targets():
'''Get a sorted list of image targets.'''
# Need to write the total image list.
os_images = []
metal_images = []
other_images = []
for image in images:
if image.os.is_os():
os_images.append(image.target)
elif image.os.is_baremetal():
metal_images.append(image.target)
else:
other_images.append(image.target)
os_images.sort()
metal_images.sort()
other_images.sort()
return os_images + metal_images + other_images
def subslice_targets(start=None, stop=None):
'''Extract a subslice of all targets.'''
targets = sorted_image_targets()
if start is not None:
targets = targets[targets.index(start):]
if stop is not None:
targets = targets[:targets.index(stop) + 1]
return targets
def build_image(docker, target, with_pkg=False):
'''Call Docker to build a single target.'''
image = image_from_target(target, with_pkg)
image_dir = 'images'
if with_pkg:
image_dir = f'pkg{image_dir}'
path = f'{HOME}/docker/{image_dir}/Dockerfile.{target}'
return subprocess.call([docker, 'build', '-t', image, HOME, '--file', path])
class CleanDistCommand(Command):
'''A custom command to clean Python dist artifacts.'''
description = 'clean artifacts from previous python builds'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
'''Clean build data.'''
shutil.rmtree(f'{HOME}/build', ignore_errors=True)
shutil.rmtree(f'{HOME}/dist', ignore_errors=True)
shutil.rmtree(f'{HOME}/xcross.egg-info', ignore_errors=True)
# Clean py2exe files
dlls = glob.glob(f'{HOME}/*.dll')
exes = glob.glob(f'{HOME}/*.exe')
sos = glob.glob(f'{HOME}/*.so')
for file in dlls + exes + sos:
os.remove(file)
class CleanCommand(Command):
'''A custom command to clean any previous builds.'''
description = 'clean all previous builds'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
'''Clean build data.'''
self.run_command('clean_dist')
shutil.rmtree(f'{HOME}/cmake/toolchain', ignore_errors=True)
shutil.rmtree(f'{HOME}/docker/images', ignore_errors=True)
shutil.rmtree(f'{HOME}/docker/pkgimages', ignore_errors=True)
shutil.rmtree(f'{HOME}/musl/config', ignore_errors=True)
shutil.rmtree(f'{HOME}/symlink/toolchain', ignore_errors=True)
class VersionCommand(Command):
'''A custom command to configure the library version.'''
description = 'set library version'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def replace(self, string, replacements):
'''Replace template variable with value.'''
for variable, value in replacements:
string = string.replace(f'^{variable}^', value)
return string
def chmod(self, file):
'''Make a file executable.'''
flags = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
st = os.stat(file)
os.chmod(file, st.st_mode | flags)
def write_file(self, path, contents, chmod):
'''Check if we need to write a file.'''
try:
with open(path, 'r') as file:
old_contents = file.read()
should_update = old_contents != contents
except FileNotFoundError:
should_update = True
if should_update:
with open(path, 'w') as file:
file.write(contents)
if chmod:
self.chmod(path)
def configure(self, template, outfile, chmod, replacements):
'''Configure a template file.'''
with open(template, 'r') as file:
contents = file.read()
contents = self.replace(contents, replacements)
self.write_file(outfile, contents, chmod)
def run(self):
'''Modify the library version.'''
version_info = f"""
version_info(
major='{major}',
minor='{minor}',
patch='{patch}',
release='{release}',
number='{number}',
build='{build}'
)"""
xcross = f'{HOME}/xcross/__init__.py'
self.configure(f'{xcross}.in', xcross, True, [
('BIN', f'"{bin_directory}"'),
('REPOSITORY', config['metadata']['repository']),
('USERNAME', config['metadata']['username']),
('VERSION_MAJOR', f"'{major}'"),
('VERSION_MINOR', f"'{minor}'"),
('VERSION_PATCH', f"'{patch}'"),
('VERSION_RELEASE', f"'{release}'"),
('VERSION_NUMBER', f"'{number}'"),
('VERSION_BUILD', f"'{build}'"),
('VERSION_INFO', textwrap.dedent(version_info)[1:]),
('VERSION', f"'{version}'"),
])
class TagCommand(Command):
'''Scripts to automatically tag new versions.'''
description = 'tag version for release'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
'''Tag version for git release.'''
# Get our config.
git = shutil.which('git')
if not git:
raise FileNotFoundError('Unable to find program git.')
tag = f'v{version}'
# Delete any existing, conflicting tags.
devnull = subprocess.DEVNULL
env = os.environ.copy()
env['GIT_DIR'] = f'{HOME}/.git'
code = subprocess.call(
['git', 'rev-parse', tag],
stdout=devnull,
stderr=devnull,
env=env,
)
if code == 0:
check_call(subprocess.call(
['git', 'tag', '-d', tag],
stdout=devnull,
stderr=devnull,
))
# Tag the release.
check_call(subprocess.call(
['git', 'tag', tag],
stdout=devnull,
stderr=devnull,
))
class BuildImageCommand(Command):
'''Build a single Docker image.'''
description = 'build a single docker image'
user_options = [
('target=', None, 'Target name'),
('with-package-managers=', None, 'Build an image with package managers.'),
]
def initialize_options(self):
self.target = None
self.with_package_managers = None
def finalize_options(self):
assert self.target is not None
parse_literal(self, 'with_package_managers', None, bool_type)
def build_image(self, docker):
'''Build a Docker image.'''
if build_image(docker, self.target, self.with_package_managers) != 0:
print(f'Error: failed to build target {self.target}', file=sys.stderr)
sys.exit(1)
def run(self):
'''Build single Docker image.'''
docker = shutil.which('docker')
if not docker:
raise FileNotFoundError('Unable to find command docker.')
self.build_image(docker)
class BuildImagesCommand(Command):
'''Build all Docker images.'''
description = 'build all docker images'
user_options = [
('start=', None, 'Start point for images to build.'),
('stop=', None, 'Stop point for images to build.'),
('with-package-managers=', None, 'Build package manager images.'),
]
def initialize_options(self):
self.start = None
self.stop = None
self.with_package_managers = None
def finalize_options(self):
parse_literal(self, 'with_package_managers', None, bool_type)
def build_image(self, docker, target, with_package_managers=False):
'''Build a Docker image.'''
if build_image(docker, target, with_package_managers) != 0:
self.failures.append(target)
return False
return True
def tag_image(self, docker, target, tag_name, with_package_managers=False):
'''Tag an image.'''
image = image_from_target(target, with_package_managers)
tag = image_from_target(tag_name, with_package_managers)
check_call(subprocess.call([docker, 'tag', image, tag]))
def build_versions(self, docker, target, with_pkg=False):
'''Build all versions of a given target.'''
if not self.build_image(docker, target, with_pkg):
return
for version in semver():
self.tag_image(docker, target, f'{target}-{version}', with_pkg)
if target.endswith('-unknown-linux-gnu'):
self.tag_versions(docker, target, target[:-len('-unknown-linux-gnu')], with_pkg)
def tag_versions(self, docker, target, tag_name, with_pkg=False):
'''Build all versions of a given target.'''
self.tag_image(docker, target, tag_name, with_pkg)
for version in semver():
self.tag_image(docker, target, f'{tag_name}-{version}', with_pkg)
def run(self):
'''Build all Docker images.'''
docker = shutil.which('docker')
if not docker:
raise FileNotFoundError('Unable to find command docker.')
# Need to build our base vcpkg for package files.
if self.with_package_managers:
if build_image(docker, 'vcpkg', True) != 0:
print('Error: failed to build target vcpkg', file=sys.stderr)
sys.exit(1)
# Build all our Docker images.
self.failures = []
for target in subslice_targets(self.start, self.stop):
self.build_versions(docker, target)
# Only build if the previous image succeeded, and if
# the image with a package manager exists.
if self.failures and self.failures[-1] == target:
continue
elif not self.with_package_managers:
continue
if os.path.exists(f'{HOME}/docker/pkgimages/Dockerfile.{target}'):
self.build_versions(docker, target, with_pkg=True)
# Print any failures.
if self.failures:
print('Error: Failures occurred.', file=sys.stderr)
print('-------------------------', file=sys.stderr)
for failure in self.failures:
print(failure, file=sys.stderr)
sys.exit(1)
class BuildAllCommand(BuildImagesCommand):
'''Build Docker images and the Python library for dist.'''
description = 'build all docker images and wheels for release'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
'''Build all images and package for release.'''
BuildImagesCommand.run(self)
self.run_command('clean_dist')
self.run_command('configure')
self.run_command('build')
self.run_command('sdist')
self.run_command('bdist_wheel')
class BuildCommand(build_py):
'''Override build command to configure builds.'''
def run(self):
self.run_command('version')
build_py.run(self)
class InstallCommand(install):
'''Override install command to configure builds.'''
def run(self):
# Note: this should already be run, and this is redundant.
# However, if `skip_build` is provided, this needs to be run.
self.run_command('version')
install.run(self)
class PushCommand(Command):
'''Push all Docker images to Docker hub.'''
description = 'push all docker images to docker hub'
user_options = [
('start=', None, 'Start point for images to push.'),
('stop=', None, 'Stop point for images to push.'),
('with-package-managers=', None, 'Build package manager images.'),
]
def initialize_options(self):
self.start = None
self.stop = None
self.with_package_managers = None
def finalize_options(self):
parse_literal(self, 'with_package_managers', None, bool_type)
def push_image(self, docker, target, with_package_managers=False):
'''Push an image to Docker Hub.'''
image = image_from_target(target, with_package_managers)
check_call(subprocess.call([docker, 'push', image]))
def push_versions(self, docker, target, with_package_managers=False):
'''Push all versions of a given target.'''
self.push_image(docker, target, with_package_managers)
for version in semver():
self.push_image(docker, f'{target}-{version}', with_package_managers)
def push_target(self, docker, target, with_package_managers=False):
'''Push all images for a given target.'''
self.push_versions(docker, target, with_package_managers)
if target.endswith('-unknown-linux-gnu'):
base = target[:-len('-unknown-linux-gnu')]
self.push_versions(docker, base, with_package_managers)
def run(self):
'''Push all Docker images to Docker hub.'''
docker = shutil.which('docker')
if not docker:
raise FileNotFoundError('Unable to find command docker.')
# Push all our Docker images.
for target in subslice_targets(self.start, self.stop):
self.push_target(docker, target)
if not self.with_package_managers:
continue
if os.path.exists(f'{HOME}/docker/pkgimages/Dockerfile.{target}'):
self.push_target(docker, target, with_package_managers=True)
class PublishCommand(Command):
'''Publish a Python version.'''
description = 'publish python version to PyPi'
user_options = [
('test=', None, 'Upload to the test repository.'),
]
def initialize_options(self):
self.test = None
def finalize_options(self):
parse_literal(self, 'test', None, bool_type)
def run(self):
'''Run the unittest suite.'''
if not has_module('twine'):
raise FileNotFoundError('Unable to find module twine.')
self.run_command('clean_dist')
self.run_command('configure')
self.run_command('build')
self.run_command('sdist')
self.run_command('bdist_wheel')
files = glob.glob(f'{HOME}/dist/*')
command = [sys.executable, '-m', 'twine', 'upload']
if self.test:
command += ['--repository', 'testpypi']
command += files
check_call(subprocess.call(command))
class TestCommand(Command):
'''Run the unittest suite.'''
description = 'run unittest suite'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
'''Run the unittest suite.'''
if not has_module('tox'):
raise FileNotFoundError('Unable to find module tox.')
check_call(subprocess.call(['tox', HOME]))
class LintCommand(Command):
'''Lint python code.'''
description = 'lint python code'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
'''Run the unittest suite.'''
if not has_module('flake8'):
raise FileNotFoundError('Unable to find module flake8.')
self.run_command('configure')
check_call(subprocess.call(['flake8', HOME]))
class TestImagesCommand(Command):
'''Run the Docker test suite.'''
description = 'run docker test suite'
user_options = [
('start=', None, 'Start point for test suite.'),
('stop=', None, 'Stop point for test suite.'),
('os=', None, 'Do operating system tests tests.'),
('metal=', None, 'Do bare-metal tests.'),
]
metal_tests = [
'arm',
'arm64',
'avr',
'ppc',
'mips',
'mipsel',
'riscv32',
'riscv64',
('i686', 'x86'),
]
def initialize_options(self):
self.start = None
self.stop = None
self.os = True
self.metal = None
def finalize_options(self):
parse_literal(self, 'os', None, bool_type)
parse_literal(self, 'metal', None, bool_type)
def git_clone(self, git, repository):
'''Clone a given repository.'''
check_call(subprocess.call([git, 'clone', repository, f'{HOME}/buildtests']))
def run_test(
self,
docker,
target,
os_type,
script=None,
cpu=None,
**envvars
):
'''Run test for a single target.'''
# Get our command.
if script is None:
script = 'image-test'
command = f'/test/{script}.sh'
if cpu is not None:
command = f'export CPU={cpu}; {command}'
# Check for additional flags.
if self.nostartfiles(target):
flags = envvars.get('FLAGS')
if flags:
flags = f'{flags} -nostartfiles'
else:
flags = '-nostartfiles'
envvars['FLAGS'] = flags
# Build and call our docker command.
docker_command = [
docker,
'run',
'--name', f'xcross-test-{target}',
'-v', f'{HOME}/test:/test',
'--env', f'IMAGE={target}',
'--env', f'TYPE={os_type}',
]
for key, value in envvars.items():
docker_command += ['--env', f'{key}={value}']
docker_command.append(image_from_target(target))
docker_command += ['/bin/bash', '-c', command]
subprocess.check_call(docker_command)
# Clean up our stoppd container.
subprocess.check_call([docker, 'rm', f'xcross-test-{target}'])
def nostartfiles(self, target):
'''Check if an image does not have startfiles.'''
# i[3-6]86 does not provide start files, a known bug with newlib.
# moxie cannot find `__bss_start__` and `__bss_end__`.
# sparc cannot find `__stack`.
# there is no crt0 for x86_64
regex = re.compile(r'''^(?:
(?:i[3-7]86-unknown-elf)|
(?:moxie.*-none-elf)|
(?:sparc-unknown-elf)|
(?:x86_64-unknown-elf)
)$''', re.X)
return regex.match(target)
def skip(self, target):
'''Check if we should skip a given target.'''
# Check if we should skip a test.
# PPCLE is linked to the proper library, which contains the
# proper symbols, but still fails with an error:
# undefined reference to `_savegpr_29`.
return target == 'ppcle-unknown-elf'
def run_wasm(self, docker, **envvars):
'''Run a web-assembly target.'''
self.run_test(
docker,
'wasm',
'script',
**envvars,
NO_PERIPHERALS='1',
TOOLCHAIN1='jsonly',
TOOLCHAIN2='wasm',
TOOLCHAIN1_FLAGS='-s WASM=0',
TOOLCHAIN2_FLAGS='-s WASM=1',
)
def run_os(self, docker):
'''Run the tests with an operating system.'''
# Configure our test runner.
has_started = True
has_stopped = False
if self.start is not None:
has_started = False
metal_images = sorted([i.target for i in images if i.os.is_baremetal()])
os_images = sorted([i.target for i in images if i.os.is_os()])
# Run OS images.
testdir = f'{HOME}/test/buildtests'
shutil.copytree(f'{HOME}/test/cpp-helloworld', testdir, dirs_exist_ok=True)
try:
for target in os_images:
if has_started or self.start == target:
has_started = True
if not self.skip(target):
self.run_test(docker, target, 'os')
if self.stop == target:
has_stopped = True
break
# Run the special images.
if has_started and not has_stopped:
self.run_wasm(docker)
self.run_wasm(docker, CMAKE_FLAGS='-DJS_ONLY=1')
self.run_test(docker, os_images[0], 'os', CMAKE_FLAGS='-GNinja')
self.run_wasm(docker, CMAKE_FLAGS='-GNinja')
self.run_test(docker, 'ppc-unknown-linux-gnu', 'os', cpu='e500mc', NORUN2='1')
self.run_test(docker, 'ppc64-unknown-linux-gnu', 'os', cpu='power9')
self.run_test(docker, 'mips-unknown-linux-gnu', 'os', cpu='24Kf')
finally:
shutil.rmtree(testdir, ignore_errors=True)
if has_stopped:
return
# Run metal images.
shutil.copytree(f'{HOME}/test/cpp-atoi', testdir, dirs_exist_ok=True)
try:
for target in metal_images:
if has_started or self.start == target:
has_started = True
if not self.skip(target):
self.run_test(docker, target, 'metal')
if self.stop == target:
has_stopped = True
break
finally:
shutil.rmtree(testdir, ignore_errors=True)
if has_stopped:
return
def run_metal(self, docker):
'''Run the bare-metal tests.'''
for arch in self.metal_tests:
if isinstance(arch, tuple):
image = f'{arch[0]}-unknown-elf'
script = f'{arch[1]}-hw'
else:
image = f'{arch}-unknown-elf',
script = f'{arch}-hw'
self.run_test(docker, image, 'metal', script=script)
def run(self):
'''Run the docker test suite.'''
# Find our necessary commands.
docker = shutil.which('docker')
if not docker:
raise FileNotFoundError('Unable to find command docker.')
if self.os:
self.run_os(docker)
if self.metal:
self.run_metal(docker)
class TestAllCommand(TestImagesCommand):
'''Run the Python and Docker test suites.'''
def run(self):
'''Run the docker test suite.'''
self.run_command('test')
TestImagesCommand.run(self)
# IMAGES
# ------
# There are two types of images:
# 1). Images with an OS layer.
# 2). Bare-metal machines.
# Bare-metal machines don't use newlibs nanomalloc, so these do not
# support system allocators.
class OperatingSystem(enum.Enum):
'''Enumerations for known operating systems.'''
Android = enum.auto()
BareMetal = enum.auto()
Linux = enum.auto()
Emscripten = enum.auto()
Windows = enum.auto()
Unknown = enum.auto()
def is_baremetal(self):
return self == OperatingSystem.BareMetal
def is_emscripten(self):
return self == OperatingSystem.Emscripten
def is_os(self):
return (
self == OperatingSystem.Android
or self == OperatingSystem.Linux
or self == OperatingSystem.Windows
)
def to_cmake(self):
'''Get the identifier for the CMake system name.'''
return cmake_string[self]
def to_conan(self):
'''Get the identifier for the Conan system name.'''
return conan_string[self]
def to_meson(self):
'''Get the identifier for the Meson system name.'''
return meson_string[self]
def to_triple(self):
'''Get the identifier as a triple string.'''
return triple_string[self]
def to_vcpkg(self):
'''Get the identifier for the vcpkg system name.'''
return vcpkg_string[self]
@staticmethod
def from_triple(string):
'''Get the operating system from a triple string.'''
return triple_os[string]
cmake_string = {
OperatingSystem.Android: 'Android',
OperatingSystem.BareMetal: 'Generic',
# This gets ignored anyway.
OperatingSystem.Emscripten: 'Emscripten',
OperatingSystem.Linux: 'Linux',
OperatingSystem.Windows: 'Windows',
OperatingSystem.Unknown: 'Generic',
}
conan_string = {
# Conan uses CMake's feature detection for Android,
# which is famously broken. We have our custom toolchains
# to pass the proper build arguments. Just say Linux,
# and run with it.
OperatingSystem.Android: 'Linux',
OperatingSystem.Linux: 'Linux',
OperatingSystem.Windows: 'Windows',
}
meson_string = {
# The default use is just to use 'linux' for Android.
OperatingSystem.Android: 'linux',
OperatingSystem.BareMetal: 'bare metal',
OperatingSystem.Linux: 'linux',
OperatingSystem.Windows: 'windows',
}
triple_string = {
OperatingSystem.Android: 'linux',
OperatingSystem.BareMetal: None,
OperatingSystem.Emscripten: 'emscripten',
OperatingSystem.Linux: 'linux',
OperatingSystem.Windows: 'w64',
}
vcpkg_string = {
**cmake_string,
# Uses MinGW for to differentiate between legacy Windows apps, the
# Universal Windows Platform. Since we only support MinGW, use it.
OperatingSystem.Windows: 'MinGW',
}
triple_os = {v: k for k, v in triple_string.items()}
oses = {
'linux': OperatingSystem.Linux,
'none': OperatingSystem.BareMetal,
}
def extract_triple(triple):
'''Extract components from the LLVM triple.'''
# Due to how we designed this, we can only
# 1. Omit the vendor, os and system.
# 2. Omit the vendor.
# 3. Omit the os.
# 4. Have all 4 components.
split = triple.split('-')
arch = split[0]
if len(split) == 1:
# ('arch',)
vendor = None
os = OperatingSystem.BareMetal
system = None
elif len(split) == 2 and split[1] in oses:
# ('arch', 'os')
vendor = None
os = oses[split[1]]
system = None
elif len(split) == 3 and split[2] == 'mingw32':
# ('arch', 'vendor', 'system')
vendor = None
os = OperatingSystem.Windows
system = split[2]
elif len(split) == 3:
# ('arch', 'vendor', 'system')
vendor = split[1]
os = OperatingSystem.BareMetal
system = split[2]
elif len(split) == 4:
# ('arch', 'vendor', 'os', 'system')
vendor = split[1]
os = OperatingSystem.from_triple(split[2])
system = split[3]
else:
raise ValueError(f'Invalid LLVM triple, got {triple}')
return (arch, vendor, os, system)
class Image:
'''
Parameters (and defaults) for custom images.
* `target` - Image name of the target (resembling an LLVM triple).
* `triple` - LLVM triple of the target. (arch, vendor, os, system)
'''
def __init__(self, target, triple=None, **kwds):
self.target = target
self.triple = triple or target
self.arch, self.vendor, self.os, self.system = extract_triple(self.triple)
for key, value in kwds.items():
setattr(self, key, value)
@classmethod
def from_dict(cls, data):
return cls(**data)
@staticmethod
def from_json(data):
image_type = data.pop('type')
return image_types[image_type].from_dict(data)
@property
def config(self):
return getattr(self, '_config', self.target)
@config.setter
def config(self, value):
self._config = value
@property
def hardcoded_cpulist(self):
cpus = getattr(self, 'cpulist', '')
if cpus:
return f'export HARDCODED="{cpus}"\n'
return ''
@property
def ld_library_path(self):
path = getattr(self, 'library_path', '')
if path:
return f'export LD_LIBRARY_PATH="{path}"\n'
return ''
@property
def ld_preload(self):
path = getattr(self, 'preload', '')
if path:
return f'export LD_PRELOAD="{path}"\n'
return ''
@property
def cc_cpu_list(self):
cpulist = getattr(self, 'cc_cpulist', '')
if cpulist:
return f'export CC_CPU_LIST="{cpulist}"\n'
return ''
@property
def run_cpu_list(self):
cpulist = getattr(self, 'run_cpulist', '')
if cpulist:
return f'export RUN_CPU_LIST="{cpulist}"\n'
return ''
@property
def flags(self):
return getattr(self, '_flags', '')
@flags.setter
def flags(self, value):
self._flags = value
@property
def optional_flags(self):
return getattr(self, '_optional_flags', '')
@optional_flags.setter
def optional_flags(self, value):
self._optional_flags = value
@property
def cflags(self):
flags = self.flags
if flags:
return f'CFLAGS="{flags}" '
return ''
@property
def optional_cflags(self):
flags = self.optional_flags
if flags:
return f'OPTIONAL_CFLAGS="{flags}" '
return ''
@property
def os(self):
return self._os
@os.setter
def os(self, value):
if isinstance(value, str):
value = OperatingSystem.from_triple(value)
self._os = value
@property
def processor(self):
return getattr(self, '_processor', self.arch)
@processor.setter
def processor(self, value):
self._processor = value
@property
def family(self):
return getattr(self, '_family', self.processor)
@family.setter
def family(self, value):
self._family = value
@property
def qemu(self):
return getattr(self, '_qemu', False)
@qemu.setter
def qemu(self, value):
self._qemu = value
@property
def linkage(self):
return getattr(self, '_linkage', 'static')
@linkage.setter
def linkage(self, value):
self._linkage = value
class AndroidImage(Image):
'''Specialized properties for Android images.'''
@property
def os(self):
return OperatingSystem.Android
@os.setter
def os(self, _):
pass
@property
def abi(self):
return getattr(self, '_abi', self.arch)
@abi.setter
def abi(self, value):
self._abi = value
@property
def prefix(self):
return getattr(self, '_prefix', self.arch)
@prefix.setter
def prefix(self, value):
self._prefix = value
@property
def toolchain(self):
return f'{self.arch}-linux-{self.system}'
@property
def qemu(self):
return False
class BuildRootImage(Image):
'''Specialized properties for buildroot images.'''
@property
def use_32(self):
return getattr(self, '_use_32', False)
@use_32.setter
def use_32(self, value):
self._use_32 = value
@property
def symlink_sysroot(self):
return getattr(self, '_symlink_sysroot', False)
@symlink_sysroot.setter
def symlink_sysroot(self, value):
self._symlink_sysroot = value
class CrosstoolImage(Image):
'''Specialized properties for crosstool-NG images.'''
@property
def patches(self):
return getattr(self, '_patches', [])
@patches.setter
def patches(self, value):
self._patches = value
class DebianImage(Image):
'''Specialized properties for Debian images.'''
@property
def cxx(self):
default = f'g++-{{version}}-{self.processor}-{self.os.to_triple()}-{self.system}'
return getattr(self, '_cxx', default).format(version=gcc_major)
@cxx.setter
def cxx(self, value):
self._cxx = value
@property
def libc(self):
default = f'libc6-{self.arch}-cross'
return getattr(self, '_libc', default)
@libc.setter
def libc(self, value):
self._libc = value
@property
def prefix(self):
return getattr(self, '_prefix', self.processor)
@prefix.setter
def prefix(self, value):
self._prefix = value
@property
def qemu(self):
return True
class MuslCrossImage(Image):
'''Specialized properties for musl-cross images.'''
@property
def gcc_config(self):
config = getattr(self, '_gcc_config', '')
if config:
return f'{config} '
return ''
@gcc_config.setter
def gcc_config(self, value):
self._gcc_config = value
class RiscvImage(Image):
'''Specialized properties for RISC-V images.'''
@property
def processor(self):
return self.target.split('-')[0]
@property
def bits(self):
return int(re.match(r'^riscv(\d+)$', self.processor).group(1))
@property
def optional_flags(self):
march = f'rv{self.bits}{self.extensions}'
flags = f'-march={march} -mabi={self.abi}'
if Image.optional_flags.fget(self):
flags = f'{self.optional_flags} {flags}'
return flags
class OtherImage(Image):
'''Specialized properties for miscellaneous images.'''
@property
def dockerfile(self):
return getattr(self, '_dockerfile', {})
@dockerfile.setter
def dockerfile(self, value):
self._dockerfile = value
image_types = {
'android': AndroidImage,
'buildroot': BuildRootImage,
'crosstool': CrosstoolImage,
'debian': DebianImage,
'musl-cross': MuslCrossImage,
'riscv': RiscvImage,
'other': OtherImage,
}
# Get all images.
images = [Image.from_json(i) for i in load_json(f'{HOME}/config/images.json')]
# Add extensions
def add_android_extensions():
'''Add Android extensions (null-op).'''
def add_buildroot_extensions():
'''Add buildroot extensions (null-op).'''
def add_crosstool_extensions():
'''Add crosstool-NG toolchain extensions (null-op).'''
def add_debian_extensions():
'''Add Debian toolchain extensions (null-op).'''
def add_musl_cross_extensions():
'''Add musl-cross toolchain extensions (null-op).'''
# Add our RISC-V images with extensions.
def create_riscv_image(os, bits, arch, abi):
'''Create a RISC-V image.'''
prefix = f'riscv{bits}-{arch}-{abi}'
if os == OperatingSystem.Linux:
target = f'{prefix}-multilib-linux-gnu'
triple = 'riscv64-unknown-linux-gnu'
qemu = True
elif os == OperatingSystem.BareMetal:
target = f'{prefix}-unknown-elf'
triple = 'riscv64-unknown-elf'
qemu = False
else:
raise ValueError(f'Unknown operating system {os.to_triple()}')
return RiscvImage.from_dict({
'target': target,
'triple': triple,
'qemu': qemu,
'extensions': arch,
'abi': abi
})
def add_riscv_extensions():
'''Add RISC-V extensions.'''
riscv = config['riscv-gnu-toolchain']
bits = riscv['bits']
extensions = riscv['extensions']
for key in extensions:
os = OperatingSystem.from_triple(extensions[key]['type'])
required_ext = extensions[key]['required']
all_ext = extensions[key]['all']
diff = ''.join([i for i in all_ext if i not in required_ext])
for bits in riscv['bits']:
abi = riscv['abi'][bits]
for count in range(len(diff) + 1):
for combo in itertools.combinations(diff, count):
arch = f'{required_ext}{"".join(combo)}'
images.append(create_riscv_image(os, bits, arch, abi))
if 'd' in arch:
images.append(create_riscv_image(os, bits, arch, f'{abi}d'))
def add_extensions():
'''Add extensions for supported operating systems.'''
add_android_extensions()
add_buildroot_extensions()
add_crosstool_extensions()
add_debian_extensions()
add_musl_cross_extensions()
add_riscv_extensions()
add_extensions()
# Filter images by types.
android_images = [i for i in images if isinstance(i, AndroidImage)]
buildroot_images = [i for i in images if isinstance(i, BuildRootImage)]
crosstool_images = [i for i in images if isinstance(i, CrosstoolImage)]
debian_images = [i for i in images if isinstance(i, DebianImage)]
musl_cross_images = [i for i in images if isinstance(i, MuslCrossImage)]
riscv_images = [i for i in images if isinstance(i, RiscvImage)]
other_images = [i for i in images if isinstance(i, OtherImage)]
def create_array(values):
'''Create a bash array from a list of values.'''
start = "(\n \""
joiner = "\"\n \""
end = "\"\n)"
return start + joiner.join(values) + end
class ConfigureCommand(VersionCommand):
'''Modify all configuration files.'''
description = 'configure template files'
def configure_scripts(self):
'''Configure the build scripts.'''
android = f'{HOME}/docker/android.sh'
bashrc = f'{HOME}/docker/bash.bashrc'
buildroot = f'{HOME}/docker/buildroot.sh'
buildroot32 = f'{HOME}/docker/buildroot32.sh'
cmake = f'{HOME}/docker/cmake.sh'
conan = f'{HOME}/docker/conan.sh'
entrypoint = f'{HOME}/docker/entrypoint.sh'
gcc = f'{HOME}/docker/gcc.sh'
gcc_patch = f'{HOME}/docker/gcc-patch.sh'
meson = f'{HOME}/docker/meson.sh'
musl = f'{HOME}/docker/musl.sh'
qemu = f'{HOME}/docker/qemu.sh'
qemu_apt = f'{HOME}/docker/qemu-apt.sh'
riscv_gcc = f'{HOME}/docker/riscv-gcc.sh'
shortcut = f'{HOME}/symlink/shortcut.sh'
target_features = f'{HOME}/spec/target_features.py'
vcpkg = f'{HOME}/docker/vcpkg.sh'
vcpkg_triplet = f'{HOME}/docker/vcpkg-triplet.sh'
self.configure(f'{android}.in', android, True, [
('CLANG_VERSION', config['android']['clang_version']),
('NDK_DIRECTORY', config['android']['ndk_directory']),
('NDK_VERSION', config['android']['ndk_version']),
('PREFIXES', create_array([i.prefix for i in android_images])),
('TOOLCHAINS', create_array([i.toolchain for i in android_images]))
])
self.configure(f'{bashrc}.in', bashrc, False, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{cmake}.in', cmake, True, [
('UBUNTU_NAME', config['ubuntu']['version']['name']),
])
self.configure(f'{conan}.in', conan, True, [
('BIN', f'"{bin_directory}"'),
('CONAN', "'/usr/local/bin/conan'"),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{buildroot}.in', buildroot, True, [
('BUILDROOT_VERSION', buildroot_version),
('JOBS', config["options"]["build_jobs"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{buildroot32}.in', buildroot32, True, [
('BUILDROOT_VERSION', buildroot_version),
('JOBS', config["options"]["build_jobs"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{entrypoint}.in', entrypoint, True, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{gcc}.in', gcc, True, [
('CROSSTOOL_VERSION', f'"{ct_version}"'),
('JOBS', config["options"]["build_jobs"]),
('SLEEP', config["options"]["sleep"]),
('TIMEOUT', config["options"]["timeout"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{gcc_patch}.in', gcc_patch, True, [
('CROSSTOOL_VERSION', f'"{ct_version}"'),
('JOBS', config["options"]["build_jobs"]),
('SLEEP', config["options"]["sleep"]),
('TIMEOUT', config["options"]["timeout"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{meson}.in', meson, True, [
('BIN', f'"{bin_directory}"'),
('MESON', "'/usr/local/bin/meson'"),
])
self.configure(f'{musl}.in', musl, True, [
('BINUTILS_VERSION', binutils_version),
('BINUTILS_XZ_SHA1', config['binutils']['version']['xz_sha1']),
('GCC_VERSION', gcc_version),
('GCC_XZ_SHA1', config['gcc']['version']['xz_sha1']),
('GMP_VERSION', gmp_version),
('GMP_BZ2_SHA1', config['gmp']['version']['bz2_sha1']),
('ISL_VERSION', isl_version),
('ISL_BZ2_SHA1', config['isl']['version']['bz2_sha1']),
('MPC_VERSION', mpc_version),
('MPC_GZ_SHA1', config['mpc']['version']['gz_sha1']),
('MPFR_VERSION', mpfr_version),
('MPFR_BZ2_SHA1', config['mpfr']['version']['bz2_sha1']),
('LINUX_HEADERS_VERSION', linux_headers_version),
('LINUX_HEADERS_XZ_SHA1', config['linux-headers']['version']['xz_sha1']),
('LINUX_VERSION', linux_version),
('LINUX_XZ_SHA1', config['linux']['version']['xz_sha1']),
('MUSL_CROSS_VERSION', musl_cross_version),
('MUSL_VERSION', musl_version),
('MUSL_GZ_SHA1', config['musl']['version']['gz_sha1']),
('JOBS', config["options"]["build_jobs"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{qemu}.in', qemu, True, [
('JOBS', config["options"]["build_jobs"]),
('QEMU_VERSION', qemu_version),
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
])
self.configure(f'{qemu_apt}.in', qemu_apt, True, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{riscv_gcc}.in', riscv_gcc, True, [
('BINUTILS_VERSION', riscv_binutils_version),
('GCC_VERSION', gcc_version),
('GDB_VERSION', riscv_gdb_version),
('GLIBC_VERSION', riscv_glibc_version),
('JOBS', config["options"]["build_jobs"]),
('NEWLIB_VERSION', riscv_newlib_version),
('TOOLCHAIN_VERSION', riscv_toolchain_version),
])
self.configure(f'{shortcut}.in', shortcut, True, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{target_features}.in', target_features, True, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{vcpkg}.in', vcpkg, True, [
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
])
self.configure(f'{vcpkg_triplet}.in', vcpkg_triplet, True, [
('BIN', f'"{bin_directory}"'),
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
])
def configure_ctng_config(self):
'''Configure the scripts for crosstool-NG.'''
patch = f'{HOME}/ct-ng/patch.sh'
replacements = []
# Patch the GCC version.
old_gcc_major = '8'
old_gcc_version = '8.3.0'
replacements.append(('GCC_V_OLD', f'CT_GCC_V_{old_gcc_major}=y'))
ct_gcc = [f'CT_GCC_V_{gcc_major}=y']
for gcc_v in reversed(range(int(old_gcc_major), int(gcc_major))):
ct_gcc.append(f'# CT_GCC_V_{gcc_v} is not set')
replacements.append(('GCC_V_NEW', '\\n'.join(ct_gcc)))
replacements.append(('GCC_OLD', old_gcc_version))
replacements.append(('GCC_NEW', gcc_version))
# Patch the MinGW version.
old_mingw_major = '6'
old_mingw_version = '6.0.0'
replacements.append(('MINGW_V_OLD', f'CT_MINGW_V_{old_mingw_major}=y'))
ct_mingw = [f'CT_MINGW_V_{mingw_major}=y']
for mingw_v in reversed(range(int(old_mingw_major), int(mingw_major))):
ct_mingw.append(f'# CT_MINGW_V_{mingw_v} is not set')
replacements.append(('MINGW_V_NEW', '\\n'.join(ct_mingw)))
replacements.append(('MINGW_OLD', old_mingw_version))
replacements.append(('MINGW_NEW', mingw_version))
# Configure the glibc version.
old_glibc_major = '2'
old_glibc_minor = '29'
old_glibc_version = '2.29'
replacements.append(('GLIBC_V_OLD', f'CT_GLIBC_V_{old_glibc_major}_{old_glibc_minor}=y'))
ct_glibc = [f'CT_GLIBC_V_{glibc_major}_{glibc_minor}=y']
if old_glibc_major == glibc_major:
for glibc_v in reversed(range(int(old_glibc_minor), int(glibc_minor))):
ct_glibc.append(f'# CT_GLIBC_V_{glibc_major}_{glibc_v} is not set')
else:
ct_glibc.append(f'# CT_GLIBC_V_{old_glibc_major}_{old_glibc_minor} is not set')
for glibc_v in reversed(range(int(old_glibc_major) + 1, int(glibc_major))):
ct_glibc.append(f'# CT_GLIBC_V_{glibc_major}_0 is not set')
replacements.append(('GLIBC_V_NEW', '\\n'.join(ct_glibc)))
replacements.append(('GLIBC_OLD', old_glibc_version))
replacements.append(('GLIBC_NEW', glibc_version))
# Configure the musl version.
old_musl_major = '1'
old_musl_minor = '1'
old_musl_patch = '21'
old_musl_version = '1.1.21'
replacements.append((
'MUSL_V_OLD',
f'CT_MUSL_V_{old_musl_major}_{old_musl_minor}_{old_musl_patch}=y'
))
ct_musl = [
f'CT_MUSL_V_{musl_major}_{musl_minor}_{musl_patch}=y',
f'# CT_MUSL_V_{old_musl_major}_{old_musl_minor}_{old_musl_patch} is not set'
]
replacements.append(('MUSL_V_NEW', '\\n'.join(ct_musl)))
replacements.append(('MUSL_OLD', old_musl_version))
replacements.append(('MUSL_NEW', musl_version))
# Configure the expat version.
old_expat_major = '2'
old_expat_minor = '2'
old_expat_version = '2.2.6'
replacements.append(('EXPAT_V_OLD', f'CT_EXPAT_V_{old_expat_major}_{old_expat_minor}=y'))
ct_expat = [
f'CT_EXPAT_V_{expat_major}_{expat_minor}=y',
f'# CT_EXPAT_V_{old_expat_major}_{old_expat_minor} is not set'
]
replacements.append(('EXPAT_V_NEW', '\\n'.join(ct_expat)))
replacements.append(('EXPAT_OLD', old_expat_version))
replacements.append(('EXPAT_NEW', expat_version))
self.configure(f'{patch}.in', patch, True, replacements)
def configure_musl_config(self):
'''Configure the MUSL libc config files.'''
template = f'{HOME}/musl/config.mak.in'
for image in musl_cross_images:
outfile = f'{HOME}/musl/config/{image.target}.mak'
self.configure(template, outfile, False, [
('BINUTILS_VERSION', binutils_version),
('GCC_CONFIG', image.gcc_config),
('GCC_VERSION', gcc_version),
('GMP_VERSION', gmp_version),
('ISL_VERSION', isl_version),
('LINUX_HEADERS_VERSION', linux_headers_version),
('LINUX_VERSION', linux_version),
('MPC_VERSION', mpc_version),
('MPFR_VERSION', mpfr_version),
('MUSL_VERSION', musl_version),
('TARGET', image.config),
('USERNAME', config['options']['username']),
])
def configure_dockerfile(
self,
image,
template=None,
replacements=None,
base='ubuntu',
spec='spec',
symlink='symlink',
toolchain='toolchain',
wrapper='wrapper',
linker='',
cc='',
cxx='',
):
'''Configure a Dockerfile from template.'''
# These files are read in the order they're likely to change,
# as well as compile-time.
# Any template files may have long compilations, and will
# change rarely. Qemu is an apt package, and unlikely to change.
# Symlinks, toolchains, and entrypoints change often, but are
# cheap and easy to fix.
contents = []
# Mandatory Docker templates, the base image.
# These will **never** change,
with open(f'{HOME}/docker/Dockerfile.{base}.in', 'r') as file:
contents.append(file.read())
with open(f'{HOME}/docker/Dockerfile.adduser.in', 'r') as file:
contents.append(file.read())
with open(f'{HOME}/docker/Dockerfile.build-essential.in', 'r') as file:
contents.append(file.read())
with open(f'{HOME}/docker/Dockerfile.directory.in', 'r') as file:
contents.append(file.read())
# Optional docker templates, in order of compiler time.
# These will change, but it's important later templates
# build faster than earlier templates. If done incorrectly,
# a full rebuild can take well over a week.
if template is not None:
with open(template, 'r') as file:
contents.append(file.read())
if image.qemu:
with open(f'{HOME}/docker/Dockerfile.qemu.in', 'r') as file:
contents.append(file.read())
if wrapper is not None:
with open(f'{HOME}/docker/Dockerfile.{wrapper}.in', 'r') as file:
contents.append(file.read())
if symlink is not None:
with open(f'{HOME}/docker/Dockerfile.{symlink}.in', 'r') as file:
contents.append(file.read())
if spec is not None:
with open(f'{HOME}/docker/Dockerfile.{spec}.in', 'r') as file:
contents.append(file.read())
if toolchain is not None:
with open(f'{HOME}/docker/Dockerfile.{toolchain}.in', 'r') as file:
contents.append(file.read())
# Add the mandatory entrypoint.
with open(f'{HOME}/docker/Dockerfile.entrypoint.in', 'r') as file:
contents.append(file.read())
# Add image labels and metadata.
with open(f'{HOME}/docker/Dockerfile.metadata.in', 'r') as file:
contents.append(file.read())
contents = '\n'.join(contents)
# Add to the replacements all the shared values.
if replacements is None:
replacements = []
replacements = replacements + [
('AUTHORS', config['metadata']['authors']),
('EMSDK_VERSION', emsdk_version),
('BIN', f'"{bin_directory}"'),
('CC', f'"{cc}"'),
('CXX', f'"{cxx}"'),
('ENTRYPOINT', f'"{bin_directory}/entrypoint.sh"'),
('FLAGS', f'"{image.flags}"'),
('LINKER', f'"{linker}"'),
('MAINTAINER', config['metadata']['maintainer']),
('OPTIONAL_FLAGS', f'"{image.optional_flags}"'),
('OS', image.os.to_triple() or 'unknown'),
('TARGET', image.target),
('UBUNTU_VERSION', ubuntu_version),
('URL', config['metadata']['url']),
('USERNAME', config['options']['username']),
('VCS_URL', config['metadata']['vcs-url']),
]
# Replace the contents and write the output to file.
outfile = f'{HOME}/docker/images/Dockerfile.{image.target}'
contents = self.replace(contents, replacements)
self.write_file(outfile, contents, False)
def configure_vcpkg_dockerfile(self, base='ubuntu'):
'''Configure only the vcpkg Dockefile.'''
# This is a base image shared by multiple builds.
contents = []
with open(f'{HOME}/docker/Dockerfile.{base}.in', 'r') as file:
contents.append(file.read())
with open(f'{HOME}/docker/Dockerfile.vcpkg.in', 'r') as file:
contents.append(file.read())
contents = '\n'.join(contents)
# Replace the contents and write the output to file.
replacements = [
('UBUNTU_VERSION', ubuntu_version),
]
outfile = f'{HOME}/docker/pkgimages/Dockerfile.vcpkg'
contents = self.replace(contents, replacements)
self.write_file(outfile, contents, False)
def configure_package_dockerfile(
self,
image,
compiler=None,
compiler_version=None,
conan_system=None,
meson_system=None,
vcpkg_system=None,
):
'''Configure a Dockerfile with package managers enabled.'''
if compiler is None:
compiler = 'gcc'
if compiler_version is None:
compiler_version = gcc_major
if conan_system is None:
conan_system = image.os.to_conan()
if meson_system is None:
meson_system = image.os.to_meson()
if vcpkg_system is None:
vcpkg_system = image.os.to_vcpkg()
template = f'{HOME}/docker/Dockerfile.package.in'
outfile = f'{HOME}/docker/pkgimages/Dockerfile.{image.target}'
self.configure(template, outfile, False, [
('COMPILER', compiler),
('COMPILER_VERSION', f'"{compiler_version}"'),
('CONAN_SYSTEM', conan_system),
('CPU_FAMILY', image.family),
('IMAGE_USER', config['options']['username']),
('LINKAGE', image.linkage),
('MESON_SYSTEM', meson_system),
('PROCESSOR', image.processor),
('REPOSITORY', config['metadata']['repository']),
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
('TARGET', image.target),
('TRIPLE', image.triple),
('USERNAME', config['metadata']['username']),
('VCPKG_SYSTEM', vcpkg_system),
])
def configure_cmake(self, image, template, replacements):
'''Configure a CMake template.'''
replacements = replacements + [
('PROCESSOR', image.processor),
('OS', image.os.to_cmake()),
('USERNAME', config["options"]["username"]),
]
contents = []
with open(template, 'r') as file:
contents.append(file.read())
with open(f'{HOME}/cmake/toolchain-include.cmake.in', 'r') as file:
contents.append(file.read())
contents = '\n'.join(contents)
# Replace the contents and write the output to file.
cmake = f'{HOME}/cmake/toolchain/{image.target}.cmake'
contents = self.replace(contents, replacements)
self.write_file(cmake, contents, False)
def configure_symlinks(self, image, template, replacements):
'''Configure a symlink template.'''
replacements = replacements + [
('CC_CPU_LIST', image.cc_cpu_list),
('FLAGS', image.cflags),
('HARDCODED', image.hardcoded_cpulist),
('LD_LIBRARY_PATH', image.ld_library_path),
('LD_PRELOAD', image.ld_preload),
('OPTIONAL_FLAGS', image.optional_cflags),
('RUN_CPU_LIST', image.run_cpu_list),
('TRIPLE', image.triple),
('USERNAME', config["options"]["username"]),
]
symlink = f'{HOME}/symlink/toolchain/{image.target}.sh'
self.configure(template, symlink, True, replacements)
def configure_android(self, image):
'''Configure an Android-SDK image.'''
# Configure the dockerfile.
template = f'{HOME}/docker/Dockerfile.android.in'
self.configure_dockerfile(image, template, [
('ARCH', image.arch),
('TOOLCHAIN', image.toolchain),
])
# Configure the CMake toolchain.
cmake_template = f'{HOME}/cmake/android.cmake.in'
self.configure_cmake(image, cmake_template, [
('ABI', image.abi),
('NDK_DIRECTORY', config['android']['ndk_directory']),
('SDK_VERSION', config['android']['sdk_version']),
])
# Configure the symlinks.
symlink_template = f'{HOME}/symlink/android.sh.in'
self.configure_symlinks(image, symlink_template, [
('NDK_DIRECTORY', config['android']['ndk_directory']),
('PREFIX', f'{image.prefix}-linux-{image.system}'),
('SDK_VERSION', config['android']['sdk_version']),
('TOOLCHAIN', image.toolchain),
])
# Build derived images with package managers enabled.
# Only want the major version, Conan fails othewise.
compiler_version = config['android']['clang_version']
major_version = re.match(r'^(\d+).*$', compiler_version).group(1)
self.configure_package_dockerfile(image, 'clang', major_version)
def configure_buildroot(self, image):
'''Configure a buildroot image.'''
# Get the proper dependent parameters for our image.
if image.symlink_sysroot:
cmake_template = f'{HOME}/cmake/buildroot-qemu.cmake.in'
symlink_template = f'{HOME}/symlink/buildroot-qemu-sysroot.sh.in'
elif image.qemu:
cmake_template = f'{HOME}/cmake/buildroot-qemu.cmake.in'
symlink_template = f'{HOME}/symlink/buildroot-qemu.sh.in'
else:
cmake_template = f'{HOME}/cmake/buildroot.cmake.in'
symlink_template = f'{HOME}/symlink/buildroot.sh.in'
if image.use_32:
template = f'{HOME}/docker/Dockerfile.buildroot32.in'
else:
template = f'{HOME}/docker/Dockerfile.buildroot.in'
self.configure_dockerfile(image, template, [
('ARCH', image.processor),
('CONFIG', image.config),
])
# Configure the CMake toolchain.
self.configure_cmake(image, cmake_template, [
('TRIPLE', image.config),
])
# Configure the symlinks.
self.configure_symlinks(image, symlink_template, [
('ARCH', image.processor),
('TRIPLE', image.triple),
])
# Build derived images with package managers enabled.
if image.os == OperatingSystem.Linux or image.os == OperatingSystem.Windows:
self.configure_package_dockerfile(image)
def configure_crosstool(self, image):
'''Configure a crosstool-NG image.'''
# Configure the dockerfile.
if image.patches:
template = f'{HOME}/docker/Dockerfile.crosstool-patch.in'
files = []
for patch in image.patches:
files += glob.glob(f'diff/{patch}.*')
patches = [f'COPY ["{i}", "/src/diff/"]' for i in files]
patches = '\n'.join(patches)
else:
template = f'{HOME}/docker/Dockerfile.crosstool.in'
patches = ''
self.configure_dockerfile(image, template, [
('ARCH', image.processor),
('CONFIG', image.config),
('PATCH', patches),
])
# Get the proper dependent parameters for our image.
if image.os == OperatingSystem.BareMetal:
cmake_template = f'{HOME}/cmake/crosstool-elf.cmake.in'
symlink_template = f'{HOME}/symlink/crosstool.sh.in'
elif image.qemu:
cmake_template = f'{HOME}/cmake/crosstool-os-qemu.cmake.in'
symlink_template = f'{HOME}/symlink/crosstool-qemu.sh.in'
else:
cmake_template = f'{HOME}/cmake/crosstool-os.cmake.in'
symlink_template = f'{HOME}/symlink/crosstool.sh.in'
# Configure the CMake toolchain.
self.configure_cmake(image, cmake_template, [
('TRIPLE', image.triple),
])
# Configure the symlinks.
self.configure_symlinks(image, symlink_template, [
('ARCH', image.processor),
('TRIPLE', image.triple),
])
# Build derived images with package managers enabled.
if image.os == OperatingSystem.Linux or image.os == OperatingSystem.Windows:
self.configure_package_dockerfile(image)
def configure_debian(self, image):
'''Configure a debian-based docker file.'''
# Configure the dockerfile.
template = f'{HOME}/docker/Dockerfile.debian.in'
self.configure_dockerfile(image, template, [
('ARCH', image.processor),
('G++', image.cxx),
('LIBC', image.libc),
])
# Get the proper dependent parameters for our image.
if image.os != OperatingSystem.Linux:
raise NotImplementedError
if image.target == 'x86_64-unknown-linux-gnu':
cmake_template = f'{HOME}/cmake/native.cmake.in'
symlink_template = f'{HOME}/symlink/native.sh.in'
else:
cmake_template = f'{HOME}/cmake/debian.cmake.in'
symlink_template = f'{HOME}/symlink/debian.sh.in'
# Configure the CMake toolchain.
self.configure_cmake(image, cmake_template, [])
# Configure the symlinks.
self.configure_symlinks(image, symlink_template, [
('GCC_MAJOR', gcc_major),
('PREFIX', image.prefix),
('PROCESSOR', image.processor),
('OS', image.os.to_triple()),
('SYSTEM', image.system),
])
# Build derived images with package managers enabled.
self.configure_package_dockerfile(image)
def configure_musl(self, image):
'''Configure a musl-cross-based image.'''
# Get the proper dependent parameters for our image.
if image.qemu:
cmake_template = f'{HOME}/cmake/musl-qemu.cmake.in'
symlink_template = f'{HOME}/symlink/musl-qemu.sh.in'
else:
cmake_template = f'{HOME}/cmake/musl.cmake.in'
symlink_template = f'{HOME}/symlink/musl.sh.in'
# Configure the dockerfile.
template = f'{HOME}/docker/Dockerfile.musl.in'
self.configure_dockerfile(image, template, [
('ARCH', image.processor),
('TRIPLE', image.config),
])
# Configure the CMake toolchain.
self.configure_cmake(image, cmake_template, [
('TRIPLE', image.config),
])
# Configure the symlinks.
self.configure_symlinks(image, symlink_template, [
('ARCH', image.processor),
('TRIPLE', image.config),
])
# Build derived images with package managers enabled.
self.configure_package_dockerfile(image)
def configure_riscv(self, image):
'''Configure a RISC-V-based image.'''
# Get the proper dependent parameters for our image.
if image.os == OperatingSystem.Linux:
cmake_template = f'{HOME}/cmake/riscv-linux.cmake.in'
elif image.os == OperatingSystem.BareMetal:
cmake_template = f'{HOME}/cmake/riscv-elf.cmake.in'
else:
raise NotImplementedError
if image.qemu:
symlink_template = f'{HOME}/symlink/riscv-qemu.sh.in'
else:
symlink_template = f'{HOME}/symlink/riscv.sh.in'
# Configure the dockerfile.
template = f'{HOME}/docker/Dockerfile.riscv.in'
self.configure_dockerfile(image, template, [
('ARCH', image.processor),
('TRIPLE', image.triple),
])
# Configure the CMake toolchain.
self.configure_cmake(image, cmake_template, [])
# Configure the symlinks.
self.configure_symlinks(image, symlink_template, [
('ARCH', image.processor),
('TRIPLE', image.triple),
])
# Build derived images with package managers enabled.
if image.os == OperatingSystem.Linux:
self.configure_package_dockerfile(image)
def configure_other(self, image):
'''Configure a miscellaneous image.'''
# Configure the dockerfile.
template = f'{HOME}/docker/Dockerfile.{image.target}.in'
if not os.path.exists(template):
template = None
self.configure_dockerfile(image, template, [
('ARCH', image.target),
('BINDIR', bin_directory),
], **image.dockerfile)
# Configure the CMake toolchain.
cmake_template = f'{HOME}/cmake/{image.target}.cmake.in'
self.configure_cmake(image, cmake_template, [])
# Configure the symlinks.
symlink_template = f'{HOME}/symlink/{image.target}.sh.in'
self.configure_symlinks(image, symlink_template, [])
# Build derived images with package managers enabled.
if hasattr(image, 'package_dockerfile'):
self.configure_package_dockerfile(image, **image.package_dockerfile)
def run(self):
'''Modify configuration files.'''
VersionCommand.run(self)
# Make the required subdirectories.
os.makedirs(f'{HOME}/cmake/toolchain', exist_ok=True)
os.makedirs(f'{HOME}/docker/images', exist_ok=True)
os.makedirs(f'{HOME}/docker/pkgimages', exist_ok=True)
os.makedirs(f'{HOME}/musl/config', exist_ok=True)
os.makedirs(f'{HOME}/symlink/toolchain', exist_ok=True)
# Configure base version info.
cmake = f'{HOME}/cmake/cmake'
emmake = f'{HOME}/symlink/emmake'
make = f'{HOME}/symlink/make.in'
self.configure(f'{cmake}.in', cmake, True, [
('CMAKE', "'/usr/bin/cmake'"),
('WRAPPER', ''),
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
])
self.configure(make, emmake, True, [
('MAKE', "'/usr/bin/make'"),
('WRAPPER', 'emmake '),
])
# Configure our build scripts, and other configurations.
self.configure_scripts()
self.configure_ctng_config()
self.configure_musl_config()
# Configure images.
self.configure_vcpkg_dockerfile()
for image in android_images:
self.configure_android(image)
for image in buildroot_images:
self.configure_buildroot(image)
for image in crosstool_images:
self.configure_crosstool(image)
for image in debian_images:
self.configure_debian(image)
for image in musl_cross_images:
self.configure_musl(image)
for image in riscv_images:
self.configure_riscv(image)
for image in other_images:
self.configure_other(image)
script = f'{HOME}/bin/xcross'
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = {
'console': [{
'script': f'{HOME}/xcross/__main__.py',
'dest_base': 'xcross',
'description': description,
'comments': long_description,
'product_name': 'xcross',
}],
'options': {
'py2exe': {
'bundle_files': 1,
'compressed': 1,
'optimize': 2,
'dist_dir': f'{HOME}',
'dll_excludes': [],
}
},
'zipfile': None
}
elif has_setuptools:
params = {
'entry_points': {
'console_scripts': ['xcross = xcross:main']
}
}
else:
params = {
'scripts': [f'{HOME}/bin/xcross']
}
setuptools.setup(
name="xcross",
author="<NAME>",
author_email="<EMAIL>",
version=version,
packages=['xcross'],
**params,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
python_requires='>3.6.0',
license='Unlicense',
keywords='compilers cross-compilation embedded',
url='https://github.com/Alexhuszagh/xcross',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: The Unlicense (Unlicense)',
'Topic :: Software Development :: Compilers',
'Topic :: Software Development :: Embedded Systems',
],
cmdclass={
'build_all': BuildAllCommand,
'build_image': BuildImageCommand,
'build_images': BuildImagesCommand,
'build_py': BuildCommand,
'clean': CleanCommand,
'clean_dist': CleanDistCommand,
'configure': ConfigureCommand,
'install': InstallCommand,
'lint': LintCommand,
'publish': PublishCommand,
'push': PushCommand,
'tag': TagCommand,
'test_images': TestImagesCommand,
'test': TestCommand,
'test_all': TestAllCommand,
'version': VersionCommand,
},
)
| [
"enum.auto",
"re.compile",
"distutils.command.build_py.build_py.run",
"sys.exit",
"os.remove",
"os.path.exists",
"textwrap.dedent",
"os.chmod",
"subprocess.call",
"os.to_triple",
"glob.glob",
"subprocess.check_call",
"shutil.which",
"setuptools.setup",
"re.match",
"ast.literal_eval",
... | [((1991, 2005), 'os.chdir', 'os.chdir', (['HOME'], {}), '(HOME)\n', (1999, 2005), False, 'import os\n'), ((74118, 75424), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""xcross"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'version': 'version', 'packages': "['xcross']", 'description': 'description', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'python_requires': '""">3.6.0"""', 'license': '"""Unlicense"""', 'keywords': '"""compilers cross-compilation embedded"""', 'url': '"""https://github.com/Alexhuszagh/xcross"""', 'classifiers': "['Development Status :: 4 - Beta', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'License :: OSI Approved :: The Unlicense (Unlicense)',\n 'Topic :: Software Development :: Compilers',\n 'Topic :: Software Development :: Embedded Systems']", 'cmdclass': "{'build_all': BuildAllCommand, 'build_image': BuildImageCommand,\n 'build_images': BuildImagesCommand, 'build_py': BuildCommand, 'clean':\n CleanCommand, 'clean_dist': CleanDistCommand, 'configure':\n ConfigureCommand, 'install': InstallCommand, 'lint': LintCommand,\n 'publish': PublishCommand, 'push': PushCommand, 'tag': TagCommand,\n 'test_images': TestImagesCommand, 'test': TestCommand, 'test_all':\n TestAllCommand, 'version': VersionCommand}"}), "(name='xcross', author='<NAME>', author_email='<EMAIL>',\n version=version, packages=['xcross'], **params, description=description,\n long_description=long_description, long_description_content_type=\n 'text/markdown', python_requires='>3.6.0', license='Unlicense',\n keywords='compilers cross-compilation embedded', url=\n 'https://github.com/Alexhuszagh/xcross', classifiers=[\n 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'License :: OSI Approved :: The Unlicense (Unlicense)',\n 'Topic :: Software Development :: Compilers',\n 'Topic :: Software Development :: Embedded Systems'], cmdclass={\n 'build_all': BuildAllCommand, 'build_image': BuildImageCommand,\n 'build_images': BuildImagesCommand, 'build_py': BuildCommand, 'clean':\n CleanCommand, 'clean_dist': CleanDistCommand, 'configure':\n ConfigureCommand, 'install': InstallCommand, 'lint': LintCommand,\n 'publish': PublishCommand, 'push': PushCommand, 'tag': TagCommand,\n 'test_images': TestImagesCommand, 'test': TestCommand, 'test_all':\n TestAllCommand, 'version': VersionCommand})\n", (74134, 75424), False, 'import setuptools\n'), ((1810, 1836), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1826, 1836), False, 'import os\n'), ((7078, 7174), 'subprocess.call', 'subprocess.call', (["[sys.executable, '-m', module, '--version']"], {'stdout': 'devnull', 'stderr': 'devnull'}), "([sys.executable, '-m', module, '--version'], stdout=devnull,\n stderr=devnull)\n", (7093, 7174), False, 'import subprocess\n'), ((8944, 9013), 'subprocess.call', 'subprocess.call', (["[docker, 'build', '-t', image, HOME, '--file', path]"], {}), "([docker, 'build', '-t', image, HOME, '--file', path])\n", (8959, 9013), False, 'import subprocess\n'), ((30319, 30330), 'enum.auto', 'enum.auto', ([], {}), '()\n', (30328, 30330), False, 'import enum\n'), ((30347, 30358), 'enum.auto', 'enum.auto', ([], {}), '()\n', (30356, 30358), False, 'import enum\n'), ((30371, 30382), 'enum.auto', 'enum.auto', ([], {}), '()\n', (30380, 30382), False, 'import enum\n'), ((30400, 30411), 'enum.auto', 'enum.auto', ([], {}), '()\n', (30409, 30411), False, 'import enum\n'), ((30426, 30437), 'enum.auto', 'enum.auto', ([], {}), '()\n', (30435, 30437), False, 'import enum\n'), ((30452, 30463), 'enum.auto', 'enum.auto', ([], {}), '()\n', (30461, 30463), False, 'import enum\n'), ((6708, 6731), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (6724, 6731), False, 'import ast\n'), ((6943, 6957), 'sys.exit', 'sys.exit', (['code'], {}), '(code)\n', (6951, 6957), False, 'import sys\n'), ((9349, 9399), 'shutil.rmtree', 'shutil.rmtree', (['f"""{HOME}/build"""'], {'ignore_errors': '(True)'}), "(f'{HOME}/build', ignore_errors=True)\n", (9362, 9399), False, 'import shutil\n'), ((9408, 9457), 'shutil.rmtree', 'shutil.rmtree', (['f"""{HOME}/dist"""'], {'ignore_errors': '(True)'}), "(f'{HOME}/dist', ignore_errors=True)\n", (9421, 9457), False, 'import shutil\n'), ((9466, 9526), 'shutil.rmtree', 'shutil.rmtree', (['f"""{HOME}/xcross.egg-info"""'], {'ignore_errors': '(True)'}), "(f'{HOME}/xcross.egg-info', ignore_errors=True)\n", (9479, 9526), False, 'import shutil\n'), ((9572, 9598), 'glob.glob', 'glob.glob', (['f"""{HOME}/*.dll"""'], {}), "(f'{HOME}/*.dll')\n", (9581, 9598), False, 'import glob\n'), ((9614, 9640), 'glob.glob', 'glob.glob', (['f"""{HOME}/*.exe"""'], {}), "(f'{HOME}/*.exe')\n", (9623, 9640), False, 'import glob\n'), ((9655, 9680), 'glob.glob', 'glob.glob', (['f"""{HOME}/*.so"""'], {}), "(f'{HOME}/*.so')\n", (9664, 9680), False, 'import glob\n'), ((10098, 10158), 'shutil.rmtree', 'shutil.rmtree', (['f"""{HOME}/cmake/toolchain"""'], {'ignore_errors': '(True)'}), "(f'{HOME}/cmake/toolchain', ignore_errors=True)\n", (10111, 10158), False, 'import shutil\n'), ((10167, 10225), 'shutil.rmtree', 'shutil.rmtree', (['f"""{HOME}/docker/images"""'], {'ignore_errors': '(True)'}), "(f'{HOME}/docker/images', ignore_errors=True)\n", (10180, 10225), False, 'import shutil\n'), ((10234, 10295), 'shutil.rmtree', 'shutil.rmtree', (['f"""{HOME}/docker/pkgimages"""'], {'ignore_errors': '(True)'}), "(f'{HOME}/docker/pkgimages', ignore_errors=True)\n", (10247, 10295), False, 'import shutil\n'), ((10304, 10360), 'shutil.rmtree', 'shutil.rmtree', (['f"""{HOME}/musl/config"""'], {'ignore_errors': '(True)'}), "(f'{HOME}/musl/config', ignore_errors=True)\n", (10317, 10360), False, 'import shutil\n'), ((10369, 10431), 'shutil.rmtree', 'shutil.rmtree', (['f"""{HOME}/symlink/toolchain"""'], {'ignore_errors': '(True)'}), "(f'{HOME}/symlink/toolchain', ignore_errors=True)\n", (10382, 10431), False, 'import shutil\n'), ((11047, 11060), 'os.stat', 'os.stat', (['file'], {}), '(file)\n', (11054, 11060), False, 'import os\n'), ((11069, 11103), 'os.chmod', 'os.chmod', (['file', '(st.st_mode | flags)'], {}), '(file, st.st_mode | flags)\n', (11077, 11103), False, 'import os\n'), ((13185, 13204), 'shutil.which', 'shutil.which', (['"""git"""'], {}), "('git')\n", (13197, 13204), False, 'import shutil\n'), ((13421, 13438), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (13436, 13438), False, 'import os\n'), ((13494, 13581), 'subprocess.call', 'subprocess.call', (["['git', 'rev-parse', tag]"], {'stdout': 'devnull', 'stderr': 'devnull', 'env': 'env'}), "(['git', 'rev-parse', tag], stdout=devnull, stderr=devnull,\n env=env)\n", (13509, 13581), False, 'import subprocess\n'), ((14843, 14865), 'shutil.which', 'shutil.which', (['"""docker"""'], {}), "('docker')\n", (14855, 14865), False, 'import shutil\n'), ((16949, 16971), 'shutil.which', 'shutil.which', (['"""docker"""'], {}), "('docker')\n", (16961, 16971), False, 'import shutil\n'), ((18968, 18986), 'distutils.command.build_py.build_py.run', 'build_py.run', (['self'], {}), '(self)\n', (18980, 18986), False, 'from distutils.command.build_py import build_py\n'), ((19276, 19293), 'distutils.command.install.install.run', 'install.run', (['self'], {}), '(self)\n', (19287, 19293), False, 'from distutils.command.install import install\n'), ((20882, 20904), 'shutil.which', 'shutil.which', (['"""docker"""'], {}), "('docker')\n", (20894, 20904), False, 'import shutil\n'), ((22089, 22116), 'glob.glob', 'glob.glob', (['f"""{HOME}/dist/*"""'], {}), "(f'{HOME}/dist/*')\n", (22098, 22116), False, 'import glob\n'), ((25401, 25438), 'subprocess.check_call', 'subprocess.check_call', (['docker_command'], {}), '(docker_command)\n', (25422, 25438), False, 'import subprocess\n'), ((25489, 25551), 'subprocess.check_call', 'subprocess.check_call', (["[docker, 'rm', f'xcross-test-{target}']"], {}), "([docker, 'rm', f'xcross-test-{target}'])\n", (25510, 25551), False, 'import subprocess\n'), ((25878, 26069), 're.compile', 're.compile', (['"""^(?:\n (?:i[3-7]86-unknown-elf)|\n (?:moxie.*-none-elf)|\n (?:sparc-unknown-elf)|\n (?:x86_64-unknown-elf)\n )$"""', 're.X'], {}), '(\n """^(?:\n (?:i[3-7]86-unknown-elf)|\n (?:moxie.*-none-elf)|\n (?:sparc-unknown-elf)|\n (?:x86_64-unknown-elf)\n )$"""\n , re.X)\n', (25888, 26069), False, 'import re\n'), ((27301, 27376), 'shutil.copytree', 'shutil.copytree', (['f"""{HOME}/test/cpp-helloworld"""', 'testdir'], {'dirs_exist_ok': '(True)'}), "(f'{HOME}/test/cpp-helloworld', testdir, dirs_exist_ok=True)\n", (27316, 27376), False, 'import shutil\n'), ((28480, 28549), 'shutil.copytree', 'shutil.copytree', (['f"""{HOME}/test/cpp-atoi"""', 'testdir'], {'dirs_exist_ok': '(True)'}), "(f'{HOME}/test/cpp-atoi', testdir, dirs_exist_ok=True)\n", (28495, 28549), False, 'import shutil\n'), ((29557, 29579), 'shutil.which', 'shutil.which', (['"""docker"""'], {}), "('docker')\n", (29569, 29579), False, 'import shutil\n'), ((71716, 71769), 'os.makedirs', 'os.makedirs', (['f"""{HOME}/cmake/toolchain"""'], {'exist_ok': '(True)'}), "(f'{HOME}/cmake/toolchain', exist_ok=True)\n", (71727, 71769), False, 'import os\n'), ((71778, 71829), 'os.makedirs', 'os.makedirs', (['f"""{HOME}/docker/images"""'], {'exist_ok': '(True)'}), "(f'{HOME}/docker/images', exist_ok=True)\n", (71789, 71829), False, 'import os\n'), ((71838, 71892), 'os.makedirs', 'os.makedirs', (['f"""{HOME}/docker/pkgimages"""'], {'exist_ok': '(True)'}), "(f'{HOME}/docker/pkgimages', exist_ok=True)\n", (71849, 71892), False, 'import os\n'), ((71901, 71950), 'os.makedirs', 'os.makedirs', (['f"""{HOME}/musl/config"""'], {'exist_ok': '(True)'}), "(f'{HOME}/musl/config', exist_ok=True)\n", (71912, 71950), False, 'import os\n'), ((71959, 72014), 'os.makedirs', 'os.makedirs', (['f"""{HOME}/symlink/toolchain"""'], {'exist_ok': '(True)'}), "(f'{HOME}/symlink/toolchain', exist_ok=True)\n", (71970, 72014), False, 'import os\n'), ((9732, 9747), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (9741, 9747), False, 'import os\n'), ((13868, 13936), 'subprocess.call', 'subprocess.call', (["['git', 'tag', tag]"], {'stdout': 'devnull', 'stderr': 'devnull'}), "(['git', 'tag', tag], stdout=devnull, stderr=devnull)\n", (13883, 13936), False, 'import subprocess\n'), ((14752, 14763), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14760, 14763), False, 'import sys\n'), ((16086, 16130), 'subprocess.call', 'subprocess.call', (["[docker, 'tag', image, tag]"], {}), "([docker, 'tag', image, tag])\n", (16101, 16130), False, 'import subprocess\n'), ((17799, 17861), 'os.path.exists', 'os.path.exists', (['f"""{HOME}/docker/pkgimages/Dockerfile.{target}"""'], {}), "(f'{HOME}/docker/pkgimages/Dockerfile.{target}')\n", (17813, 17861), False, 'import os\n'), ((18217, 18228), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (18225, 18228), False, 'import sys\n'), ((20082, 20122), 'subprocess.call', 'subprocess.call', (["[docker, 'push', image]"], {}), "([docker, 'push', image])\n", (20097, 20122), False, 'import subprocess\n'), ((21232, 21294), 'os.path.exists', 'os.path.exists', (['f"""{HOME}/docker/pkgimages/Dockerfile.{target}"""'], {}), "(f'{HOME}/docker/pkgimages/Dockerfile.{target}')\n", (21246, 21294), False, 'import os\n'), ((22295, 22319), 'subprocess.call', 'subprocess.call', (['command'], {}), '(command)\n', (22310, 22319), False, 'import subprocess\n'), ((22718, 22748), 'subprocess.call', 'subprocess.call', (["['tox', HOME]"], {}), "(['tox', HOME])\n", (22733, 22748), False, 'import subprocess\n'), ((23183, 23216), 'subprocess.call', 'subprocess.call', (["['flake8', HOME]"], {}), "(['flake8', HOME])\n", (23198, 23216), False, 'import subprocess\n'), ((24141, 24206), 'subprocess.call', 'subprocess.call', (["[git, 'clone', repository, f'{HOME}/buildtests']"], {}), "([git, 'clone', repository, f'{HOME}/buildtests'])\n", (24156, 24206), False, 'import subprocess\n'), ((28357, 28399), 'shutil.rmtree', 'shutil.rmtree', (['testdir'], {'ignore_errors': '(True)'}), '(testdir, ignore_errors=True)\n', (28370, 28399), False, 'import shutil\n'), ((28942, 28984), 'shutil.rmtree', 'shutil.rmtree', (['testdir'], {'ignore_errors': '(True)'}), '(testdir, ignore_errors=True)\n', (28955, 28984), False, 'import shutil\n'), ((70836, 70860), 'os.path.exists', 'os.path.exists', (['template'], {}), '(template)\n', (70850, 70860), False, 'import os\n'), ((13682, 13756), 'subprocess.call', 'subprocess.call', (["['git', 'tag', '-d', tag]"], {'stdout': 'devnull', 'stderr': 'devnull'}), "(['git', 'tag', '-d', tag], stdout=devnull, stderr=devnull)\n", (13697, 13756), False, 'import subprocess\n'), ((17313, 17324), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (17321, 17324), False, 'import sys\n'), ((42807, 42842), 'itertools.combinations', 'itertools.combinations', (['diff', 'count'], {}), '(diff, count)\n', (42829, 42842), False, 'import itertools\n'), ((63518, 63558), 're.match', 're.match', (['"""^(\\\\d+).*$"""', 'compiler_version'], {}), "('^(\\\\d+).*$', compiler_version)\n", (63526, 63558), False, 'import re\n'), ((65426, 65454), 'glob.glob', 'glob.glob', (['f"""diff/{patch}.*"""'], {}), "(f'diff/{patch}.*')\n", (65435, 65454), False, 'import glob\n'), ((40199, 40240), 're.match', 're.match', (['"""^riscv(\\\\d+)$"""', 'self.processor'], {}), "('^riscv(\\\\d+)$', self.processor)\n", (40207, 40240), False, 'import re\n'), ((12751, 12780), 'textwrap.dedent', 'textwrap.dedent', (['version_info'], {}), '(version_info)\n', (12766, 12780), False, 'import textwrap\n'), ((42056, 42070), 'os.to_triple', 'os.to_triple', ([], {}), '()\n', (42068, 42070), False, 'import os\n')] |
from typing import Any
from unittest import TestCase
from unittest.mock import patch, MagicMock
import yaml
from github import GithubException
from reconcile.utils.openshift_resource import ResourceInventory
from reconcile.utils.saasherder import SaasHerder
from reconcile.utils.jjb_client import JJB
from reconcile.utils.saasherder import TARGET_CONFIG_HASH
from .fixtures import Fixtures
class MockJJB:
def __init__(self, data):
self.jobs = data
def get_all_jobs(self, job_types):
return self.jobs
@staticmethod
def get_repo_url(job):
return JJB.get_repo_url(job)
@staticmethod
def get_ref(job):
return JJB.get_ref(job)
class TestSaasFileValid(TestCase):
def setUp(self):
self.saas_files = [
{
"path": "path1",
"name": "a1",
"managedResourceTypes": [],
"resourceTemplates": [
{
"name": "rt",
"url": "url",
"targets": [
{
"namespace": {
"name": "ns",
"environment": {"name": "env1", "parameters": "{}"},
"cluster": {"name": "cluster"},
},
"ref": "main",
"upstream": {"instance": {"name": "ci"}, "name": "job"},
"parameters": {},
},
{
"namespace": {
"name": "ns",
"environment": {"name": "env2", "parameters": "{}"},
"cluster": {"name": "cluster"},
},
"ref": "master",
"upstream": {"instance": {"name": "ci"}, "name": "job"},
"parameters": {},
},
],
}
],
"roles": [{"users": [{"org_username": "myname"}]}],
}
]
jjb_mock_data = {
"ci": [
{
"name": "job",
"properties": [{"github": {"url": "url"}}],
"scm": [{"git": {"branches": ["main"]}}],
},
{
"name": "job",
"properties": [{"github": {"url": "url"}}],
"scm": [{"git": {"branches": ["master"]}}],
},
]
}
self.jjb = MockJJB(jjb_mock_data)
def test_check_saas_file_env_combo_unique(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertTrue(saasherder.valid)
def test_check_saas_file_env_combo_not_unique(self):
self.saas_files[0][
"name"
] = "long-name-which-is-too-long-to-produce-unique-combo"
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertFalse(saasherder.valid)
def test_check_saas_file_upstream_not_used_with_commit_sha(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertTrue(saasherder.valid)
def test_check_saas_file_upstream_used_with_commit_sha(self):
self.saas_files[0]["resourceTemplates"][0]["targets"][0][
"ref"
] = "2637b6c41bda7731b1bcaaf18b4a50d7c5e63e30"
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertFalse(saasherder.valid)
def test_validate_image_tag_not_equals_ref_valid(self):
self.saas_files[0]["resourceTemplates"][0]["targets"][0][
"parameters"
] = '{"IMAGE_TAG": "2637b6c"}'
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertTrue(saasherder.valid)
def test_validate_image_tag_not_equals_ref_invalid(self):
self.saas_files[0]["resourceTemplates"][0]["targets"][0][
"ref"
] = "2637b6c41bda7731b1bcaaf18b4a50d7c5e63e30"
self.saas_files[0]["resourceTemplates"][0]["targets"][0][
"parameters"
] = '{"IMAGE_TAG": "2637b6c"}'
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertFalse(saasherder.valid)
def test_validate_upstream_jobs_valid(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
saasherder.validate_upstream_jobs(self.jjb)
self.assertTrue(saasherder.valid)
def test_validate_upstream_jobs_invalid(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
jjb = MockJJB({"ci": []})
saasherder.validate_upstream_jobs(jjb)
self.assertFalse(saasherder.valid)
def test_check_saas_file_promotion_same_source(self):
rts = [
{
"name": "rt_publisher",
"url": "repo_publisher",
"targets": [
{
"namespace": {
"name": "ns",
"environment": {"name": "env1"},
"cluster": {"name": "cluster"},
},
"parameters": {},
"ref": "0000000000000",
"promotion": {
"publish": ["channel-1"],
},
}
],
},
{
"name": "rt_subscriber",
"url": "this-repo-will-not-match-the-publisher",
"targets": [
{
"namespace": {
"name": "ns2",
"environment": {"name": "env1"},
"cluster": {"name": "cluster"},
},
"parameters": {},
"ref": "0000000000000",
"promotion": {
"auto": "true",
"subscribe": ["channel-1"],
},
}
],
},
]
self.saas_files[0]["resourceTemplates"] = rts
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertFalse(saasherder.valid)
class TestGetMovingCommitsDiffSaasFile(TestCase):
def setUp(self):
self.saas_files = [
{
"path": "path1",
"name": "a1",
"managedResourceTypes": [],
"resourceTemplates": [
{
"name": "rt",
"url": "http://github.com/user/repo",
"targets": [
{
"namespace": {
"name": "ns",
"environment": {"name": "env1"},
"cluster": {"name": "cluster1"},
},
"parameters": {},
"ref": "main",
},
{
"namespace": {
"name": "ns",
"environment": {"name": "env2"},
"cluster": {"name": "cluster2"},
},
"parameters": {},
"ref": "secondary",
},
],
}
],
"roles": [{"users": [{"org_username": "myname"}]}],
}
]
self.initiate_gh_patcher = patch.object(
SaasHerder, "_initiate_github", autospec=True
)
self.get_pipelines_provider_patcher = patch.object(
SaasHerder, "_get_pipelines_provider"
)
self.get_commit_sha_patcher = patch.object(
SaasHerder, "_get_commit_sha", autospec=True
)
self.initiate_gh = self.initiate_gh_patcher.start()
self.get_pipelines_provider = self.get_pipelines_provider_patcher.start()
self.get_commit_sha = self.get_commit_sha_patcher.start()
self.maxDiff = None
def tearDown(self):
for p in (
self.initiate_gh_patcher,
self.get_pipelines_provider_patcher,
self.get_commit_sha_patcher,
):
p.stop()
def test_get_moving_commits_diff_saas_file_all_fine(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=False,
)
saasherder.state = MagicMock()
saasherder.state.get.return_value = "asha"
self.get_commit_sha.side_effect = ("abcd4242", "4242efg")
self.get_pipelines_provider.return_value = "apipelineprovider"
expected = [
{
"saas_file_name": self.saas_files[0]["name"],
"env_name": "env1",
"timeout": None,
"ref": "main",
"commit_sha": "abcd4242",
"cluster_name": "cluster1",
"pipelines_provider": "apipelineprovider",
"namespace_name": "ns",
"rt_name": "rt",
},
{
"saas_file_name": self.saas_files[0]["name"],
"env_name": "env2",
"timeout": None,
"ref": "secondary",
"commit_sha": "4242efg",
"cluster_name": "cluster2",
"pipelines_provider": "apipelineprovider",
"namespace_name": "ns",
"rt_name": "rt",
},
]
self.assertEqual(
saasherder.get_moving_commits_diff_saas_file(self.saas_files[0], True),
expected,
)
def test_get_moving_commits_diff_saas_file_bad_sha1(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=False,
)
saasherder.state = MagicMock()
saasherder.state.get.return_value = "asha"
self.get_pipelines_provider.return_value = "apipelineprovider"
self.get_commit_sha.side_effect = GithubException(
401, "somedata", {"aheader": "avalue"}
)
# At least we don't crash!
self.assertEqual(
saasherder.get_moving_commits_diff_saas_file(self.saas_files[0], True), []
)
class TestPopulateDesiredState(TestCase):
def setUp(self):
saas_files = []
self.fxts = Fixtures("saasherder_populate_desired")
for file in [self.fxts.get("saas_remote_openshift_template.yaml")]:
saas_files.append(yaml.safe_load(file))
self.assertEqual(1, len(saas_files))
self.saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={"hashLength": 7},
)
# Mock GitHub interactions.
self.initiate_gh_patcher = patch.object(
SaasHerder,
"_initiate_github",
autospec=True,
return_value=None,
)
self.get_file_contents_patcher = patch.object(
SaasHerder,
"_get_file_contents",
wraps=self.fake_get_file_contents,
)
self.initiate_gh_patcher.start()
self.get_file_contents_patcher.start()
# Mock image checking.
self.get_check_images_patcher = patch.object(
SaasHerder,
"_check_images",
autospec=True,
return_value=None,
)
self.get_check_images_patcher.start()
def fake_get_file_contents(self, options):
self.assertEqual("https://github.com/rhobs/configuration", options["url"])
content = self.fxts.get(options["ref"] + (options["path"].replace("/", "_")))
return yaml.safe_load(content), "yolo", options["ref"]
def tearDown(self):
for p in (
self.initiate_gh_patcher,
self.get_file_contents_patcher,
self.get_check_images_patcher,
):
p.stop()
def test_populate_desired_state_saas_file_delete(self):
spec = {"delete": True}
desired_state = self.saasherder.populate_desired_state_saas_file(spec, None)
self.assertIsNone(desired_state)
def test_populate_desired_state_cases(self):
ri = ResourceInventory()
for resource_type in (
"Deployment",
"Service",
"ConfigMap",
):
ri.initialize_resource_type("stage-1", "yolo-stage", resource_type)
ri.initialize_resource_type("prod-1", "yolo", resource_type)
self.saasherder.populate_desired_state(ri)
cnt = 0
for (cluster, namespace, resource_type, data) in ri:
for _, d_item in data["desired"].items():
expected = yaml.safe_load(
self.fxts.get(
f"expected_{cluster}_{namespace}_{resource_type}.json",
)
)
self.assertEqual(expected, d_item.body)
cnt += 1
self.assertEqual(5, cnt, "expected 5 resources, found less")
class TestCollectRepoUrls(TestCase):
def test_collect_repo_urls(self):
repo_url = "git-repo"
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [{"name": "name", "url": repo_url, "targets": []}],
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
self.assertEqual({repo_url}, saasherder.repo_urls)
class TestGetSaasFileAttribute(TestCase):
def test_attribute_none(self):
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [],
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
att = saasherder._get_saas_file_feature_enabled("no_such_attribute")
self.assertEqual(att, None)
def test_attribute_not_none(self):
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [],
"attrib": True,
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
att = saasherder._get_saas_file_feature_enabled("attrib")
self.assertEqual(att, True)
def test_attribute_none_with_default(self):
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [],
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
att = saasherder._get_saas_file_feature_enabled("no_such_att", default=True)
self.assertEqual(att, True)
def test_attribute_not_none_with_default(self):
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [],
"attrib": True,
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
att = saasherder._get_saas_file_feature_enabled("attrib", default=False)
self.assertEqual(att, True)
def test_attribute_multiple_saas_files_return_false(self):
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [],
"attrib": True,
},
{
"path": "path2",
"name": "name2",
"managedResourceTypes": [],
"resourceTemplates": [],
},
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
self.assertFalse(saasherder._get_saas_file_feature_enabled("attrib"))
def test_attribute_multiple_saas_files_with_default_return_false(self):
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [],
"attrib": True,
},
{
"path": "path2",
"name": "name2",
"managedResourceTypes": [],
"resourceTemplates": [],
"attrib": True,
},
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
att = saasherder._get_saas_file_feature_enabled("attrib", default=True)
self.assertFalse(att)
class TestConfigHashPromotionsValidation(TestCase):
"""TestCase to test SaasHerder promotions validation. SaasHerder is
initialized with ResourceInventory population. Like is done in
openshift-saas-deploy"""
cluster: str
namespace: str
fxt: Any
template: Any
@classmethod
def setUpClass(cls):
cls.fxt = Fixtures("saasherder")
cls.cluster = "test-cluster"
cls.template = cls.fxt.get_anymarkup("template_1.yml")
def setUp(self) -> None:
self.all_saas_files = [self.fxt.get_anymarkup("saas.gql.yml")]
self.state_patcher = patch("reconcile.utils.saasherder.State", autospec=True)
self.state_mock = self.state_patcher.start().return_value
self.ig_patcher = patch.object(SaasHerder, "_initiate_github", autospec=True)
self.ig_patcher.start()
self.image_auth_patcher = patch.object(SaasHerder, "_initiate_image_auth")
self.image_auth_patcher.start()
self.gfc_patcher = patch.object(SaasHerder, "_get_file_contents", autospec=True)
gfc_mock = self.gfc_patcher.start()
self.saas_file = self.fxt.get_anymarkup("saas.gql.yml")
# ApiVersion is set in the saas gql query method in queries module
self.saas_file["apiVersion"] = "v2"
gfc_mock.return_value = (self.template, "url", "ahash")
self.deploy_current_state_fxt = self.fxt.get_anymarkup("saas_deploy.state.json")
self.post_deploy_current_state_fxt = self.fxt.get_anymarkup(
"saas_post_deploy.state.json"
)
self.saasherder = SaasHerder(
[self.saas_file],
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
accounts={"name": "test-account"}, # Initiates State in SaasHerder
settings={"hashLength": 24},
)
# IMPORTANT: Populating desired state modify self.saas_files within
# saasherder object.
self.ri = ResourceInventory()
for ns in ["test-ns-publisher", "test-ns-subscriber"]:
for kind in ["Service", "Deployment"]:
self.ri.initialize_resource_type(self.cluster, ns, kind)
self.saasherder.populate_desired_state(self.ri)
if self.ri.has_error_registered():
raise Exception("Errors registered in Resourceinventory")
def tearDown(self):
self.state_patcher.stop()
self.ig_patcher.stop()
self.gfc_patcher.stop()
def test_config_hash_is_filled(self):
"""Ensures the get_config_diff_saas_file fills the promotion data
on the publisher target. This data is used in publish_promotions
method to add the hash to subscribed targets.
IMPORTANT: This is not the promotion_data within promotion. This
fields are set by _process_template method in saasherder
"""
job_spec = self.saasherder.get_configs_diff_saas_file(self.saas_file)[0]
promotion = job_spec["target_config"]["promotion"]
self.assertIsNotNone(promotion[TARGET_CONFIG_HASH])
def test_promotion_state_config_hash_match_validates(self):
"""A promotion is valid if the parent target config_hash set in
the state is equal to the one set in the subscriber target
promotion data. This is the happy path.
"""
publisher_state = {
"success": True,
"saas_file": self.saas_file["name"],
TARGET_CONFIG_HASH: "ed2af38cf21f268c",
}
self.state_mock.get.return_value = publisher_state
result = self.saasherder.validate_promotions()
self.assertTrue(result)
def test_promotion_state_config_hash_not_match_no_validates(self):
"""Promotion is not valid if the parent target config hash set in
the state does not match with the one set in the subsriber target
promotion_data. This could happen if the parent target has run again
with the same ref before before the subscriber target promotion MR is
merged.
"""
publisher_state = {
"success": True,
"saas_file": self.saas_file["name"],
TARGET_CONFIG_HASH: "will_not_match",
}
self.state_mock.get.return_value = publisher_state
result = self.saasherder.validate_promotions()
self.assertFalse(result)
def test_promotion_without_state_config_hash_validates(self):
"""Existent states won't have promotion data. If there is an ongoing
promotion, this ensures it will happen.
"""
publisher_state = {
"success": True,
}
self.state_mock.get.return_value = publisher_state
result = self.saasherder.validate_promotions()
self.assertTrue(result)
def test_promotion_without_promotion_data_validates(self):
"""A manual promotion might be required, subsribed targets without
promotion_data should validate if the parent target job has succed
with the same ref.
"""
publisher_state = {
"success": True,
"saas_file": self.saas_file["name"],
TARGET_CONFIG_HASH: "whatever",
}
# Remove promotion_data on the promoted target
self.saasherder.promotions[1]["promotion_data"] = None
self.state_mock.get.return_value = publisher_state
result = self.saasherder.validate_promotions()
self.assertTrue(result)
class TestConfigHashTrigger(TestCase):
"""TestCase to test Openshift SAAS deploy configs trigger. SaasHerder is
initialized WITHOUT ResourceInventory population. Like is done in the
config changes trigger"""
cluster: str
namespace: str
fxt: Any
template: Any
@classmethod
def setUpClass(cls):
cls.fxt = Fixtures("saasherder")
cls.cluster = "test-cluster"
def setUp(self) -> None:
self.all_saas_files = [self.fxt.get_anymarkup("saas.gql.yml")]
self.state_patcher = patch("reconcile.utils.saasherder.State", autospec=True)
self.state_mock = self.state_patcher.start().return_value
self.saas_file = self.fxt.get_anymarkup("saas.gql.yml")
# ApiVersion is set in the saas gql query method in queries module
self.saas_file["apiVersion"] = "v2"
self.deploy_current_state_fxt = self.fxt.get_anymarkup("saas_deploy.state.json")
self.post_deploy_current_state_fxt = self.fxt.get_anymarkup(
"saas_post_deploy.state.json"
)
self.state_mock.get.side_effect = [
self.deploy_current_state_fxt,
self.post_deploy_current_state_fxt,
]
self.saasherder = SaasHerder(
[self.saas_file],
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
accounts={"name": "test-account"}, # Initiates State in SaasHerder
settings={"hashLength": 24},
)
def tearDown(self):
self.state_patcher.stop()
def test_same_configs_do_not_trigger(self):
"""Ensures that if the same config is found, no job is triggered
current Config is fetched from the state
"""
job_specs = self.saasherder.get_configs_diff_saas_file(self.saas_file)
self.assertListEqual(job_specs, [])
def test_config_hash_change_do_trigger(self):
"""Ensures a new job is triggered if the parent config hash changes"""
configs = self.saasherder.get_saas_targets_config(self.saas_file)
desired_tc = list(configs.values())[1]
desired_promo_data = desired_tc["promotion"]["promotion_data"]
desired_promo_data[0]["data"][0][TARGET_CONFIG_HASH] = "Changed"
job_specs = self.saasherder.get_configs_diff_saas_file(self.saas_file)
self.assertEqual(len(job_specs), 1)
def test_non_existent_config_triggers(self):
self.state_mock.get.side_effect = [self.deploy_current_state_fxt, None]
job_specs = self.saasherder.get_configs_diff_saas_file(self.saas_file)
self.assertEqual(len(job_specs), 1)
class TestRemoveNoneAttributes(TestCase):
def testSimpleDict(self):
input = {"a": 1, "b": {}, "d": None, "e": {"aa": "aa", "bb": None}}
expected = {"a": 1, "b": {}, "e": {"aa": "aa"}}
res = SaasHerder.remove_none_values(input)
self.assertEqual(res, expected)
def testNoneValue(self):
input = None
expected = {}
res = SaasHerder.remove_none_values(input)
self.assertEqual(res, expected)
| [
"reconcile.utils.openshift_resource.ResourceInventory",
"reconcile.utils.saasherder.SaasHerder.remove_none_values",
"reconcile.utils.saasherder.SaasHerder",
"reconcile.utils.jjb_client.JJB.get_ref",
"unittest.mock.MagicMock",
"reconcile.utils.jjb_client.JJB.get_repo_url",
"yaml.safe_load",
"github.Git... | [((591, 612), 'reconcile.utils.jjb_client.JJB.get_repo_url', 'JJB.get_repo_url', (['job'], {}), '(job)\n', (607, 612), False, 'from reconcile.utils.jjb_client import JJB\n'), ((669, 685), 'reconcile.utils.jjb_client.JJB.get_ref', 'JJB.get_ref', (['job'], {}), '(job)\n', (680, 685), False, 'from reconcile.utils.jjb_client import JJB\n'), ((2886, 3018), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['self.saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}', 'validate': '(True)'}), "(self.saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={}, validate=True)\n", (2896, 3018), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((3345, 3477), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['self.saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}', 'validate': '(True)'}), "(self.saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={}, validate=True)\n", (3355, 3477), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((3705, 3837), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['self.saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}', 'validate': '(True)'}), "(self.saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={}, validate=True)\n", (3715, 3837), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((4199, 4331), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['self.saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}', 'validate': '(True)'}), "(self.saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={}, validate=True)\n", (4209, 4331), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((4679, 4811), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['self.saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}', 'validate': '(True)'}), "(self.saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={}, validate=True)\n", (4689, 4811), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((5299, 5431), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['self.saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}', 'validate': '(True)'}), "(self.saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={}, validate=True)\n", (5309, 5431), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((5638, 5770), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['self.saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}', 'validate': '(True)'}), "(self.saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={}, validate=True)\n", (5648, 5770), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((6029, 6161), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['self.saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}', 'validate': '(True)'}), "(self.saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={}, validate=True)\n", (6039, 6161), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((7891, 8023), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['self.saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}', 'validate': '(True)'}), "(self.saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={}, validate=True)\n", (7901, 8023), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((9634, 9693), 'unittest.mock.patch.object', 'patch.object', (['SaasHerder', '"""_initiate_github"""'], {'autospec': '(True)'}), "(SaasHerder, '_initiate_github', autospec=True)\n", (9646, 9693), False, 'from unittest.mock import patch, MagicMock\n'), ((9762, 9813), 'unittest.mock.patch.object', 'patch.object', (['SaasHerder', '"""_get_pipelines_provider"""'], {}), "(SaasHerder, '_get_pipelines_provider')\n", (9774, 9813), False, 'from unittest.mock import patch, MagicMock\n'), ((9874, 9932), 'unittest.mock.patch.object', 'patch.object', (['SaasHerder', '"""_get_commit_sha"""'], {'autospec': '(True)'}), "(SaasHerder, '_get_commit_sha', autospec=True)\n", (9886, 9932), False, 'from unittest.mock import patch, MagicMock\n'), ((10480, 10613), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['self.saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}', 'validate': '(False)'}), "(self.saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={}, validate=False)\n", (10490, 10613), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((10732, 10743), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (10741, 10743), False, 'from unittest.mock import patch, MagicMock\n'), ((12013, 12146), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['self.saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}', 'validate': '(False)'}), "(self.saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={}, validate=False)\n", (12023, 12146), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((12265, 12276), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (12274, 12276), False, 'from unittest.mock import patch, MagicMock\n'), ((12441, 12496), 'github.GithubException', 'GithubException', (['(401)', '"""somedata"""', "{'aheader': 'avalue'}"], {}), "(401, 'somedata', {'aheader': 'avalue'})\n", (12456, 12496), False, 'from github import GithubException\n'), ((13026, 13153), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': "{'hashLength': 7}"}), "(saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={'hashLength': 7})\n", (13036, 13153), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((13305, 13383), 'unittest.mock.patch.object', 'patch.object', (['SaasHerder', '"""_initiate_github"""'], {'autospec': '(True)', 'return_value': 'None'}), "(SaasHerder, '_initiate_github', autospec=True, return_value=None)\n", (13317, 13383), False, 'from unittest.mock import patch, MagicMock\n'), ((13484, 13570), 'unittest.mock.patch.object', 'patch.object', (['SaasHerder', '"""_get_file_contents"""'], {'wraps': 'self.fake_get_file_contents'}), "(SaasHerder, '_get_file_contents', wraps=self.\n fake_get_file_contents)\n", (13496, 13570), False, 'from unittest.mock import patch, MagicMock\n'), ((13773, 13848), 'unittest.mock.patch.object', 'patch.object', (['SaasHerder', '"""_check_images"""'], {'autospec': '(True)', 'return_value': 'None'}), "(SaasHerder, '_check_images', autospec=True, return_value=None)\n", (13785, 13848), False, 'from unittest.mock import patch, MagicMock\n'), ((14719, 14738), 'reconcile.utils.openshift_resource.ResourceInventory', 'ResourceInventory', ([], {}), '()\n', (14736, 14738), False, 'from reconcile.utils.openshift_resource import ResourceInventory\n'), ((15929, 16041), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}'}), "(saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={})\n", (15939, 16041), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((16493, 16605), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}'}), "(saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={})\n", (16503, 16605), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((17104, 17216), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}'}), "(saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={})\n", (17114, 17216), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((17681, 17793), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}'}), "(saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={})\n", (17691, 17793), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((18313, 18425), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}'}), "(saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={})\n", (18323, 18425), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((19133, 19245), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}'}), "(saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={})\n", (19143, 19245), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((19959, 20071), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['saas_files'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'settings': '{}'}), "(saas_files, thread_pool_size=1, gitlab=None, integration='',\n integration_version='', settings={})\n", (19969, 20071), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((20866, 20922), 'unittest.mock.patch', 'patch', (['"""reconcile.utils.saasherder.State"""'], {'autospec': '(True)'}), "('reconcile.utils.saasherder.State', autospec=True)\n", (20871, 20922), False, 'from unittest.mock import patch, MagicMock\n'), ((21016, 21075), 'unittest.mock.patch.object', 'patch.object', (['SaasHerder', '"""_initiate_github"""'], {'autospec': '(True)'}), "(SaasHerder, '_initiate_github', autospec=True)\n", (21028, 21075), False, 'from unittest.mock import patch, MagicMock\n'), ((21143, 21191), 'unittest.mock.patch.object', 'patch.object', (['SaasHerder', '"""_initiate_image_auth"""'], {}), "(SaasHerder, '_initiate_image_auth')\n", (21155, 21191), False, 'from unittest.mock import patch, MagicMock\n'), ((21260, 21321), 'unittest.mock.patch.object', 'patch.object', (['SaasHerder', '"""_get_file_contents"""'], {'autospec': '(True)'}), "(SaasHerder, '_get_file_contents', autospec=True)\n", (21272, 21321), False, 'from unittest.mock import patch, MagicMock\n'), ((21854, 22029), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['[self.saas_file]'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'accounts': "{'name': 'test-account'}", 'settings': "{'hashLength': 24}"}), "([self.saas_file], thread_pool_size=1, gitlab=None, integration=\n '', integration_version='', accounts={'name': 'test-account'}, settings\n ={'hashLength': 24})\n", (21864, 22029), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((22272, 22291), 'reconcile.utils.openshift_resource.ResourceInventory', 'ResourceInventory', ([], {}), '()\n', (22289, 22291), False, 'from reconcile.utils.openshift_resource import ResourceInventory\n'), ((26297, 26353), 'unittest.mock.patch', 'patch', (['"""reconcile.utils.saasherder.State"""'], {'autospec': '(True)'}), "('reconcile.utils.saasherder.State', autospec=True)\n", (26302, 26353), False, 'from unittest.mock import patch, MagicMock\n'), ((26989, 27164), 'reconcile.utils.saasherder.SaasHerder', 'SaasHerder', (['[self.saas_file]'], {'thread_pool_size': '(1)', 'gitlab': 'None', 'integration': '""""""', 'integration_version': '""""""', 'accounts': "{'name': 'test-account'}", 'settings': "{'hashLength': 24}"}), "([self.saas_file], thread_pool_size=1, gitlab=None, integration=\n '', integration_version='', accounts={'name': 'test-account'}, settings\n ={'hashLength': 24})\n", (26999, 27164), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((28641, 28677), 'reconcile.utils.saasherder.SaasHerder.remove_none_values', 'SaasHerder.remove_none_values', (['input'], {}), '(input)\n', (28670, 28677), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((28805, 28841), 'reconcile.utils.saasherder.SaasHerder.remove_none_values', 'SaasHerder.remove_none_values', (['input'], {}), '(input)\n', (28834, 28841), False, 'from reconcile.utils.saasherder import SaasHerder\n'), ((14187, 14210), 'yaml.safe_load', 'yaml.safe_load', (['content'], {}), '(content)\n', (14201, 14210), False, 'import yaml\n'), ((12932, 12952), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (12946, 12952), False, 'import yaml\n')] |
import unittest
import time
import copy
from unittest.mock import patch, MagicMock, call
from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties
from kafka import KafkaProducer
class TestPostalService(unittest.TestCase):
def setUp(self):
self.mock_delivery_service = MagicMock()
def test_init_without_delivery_service_throws_error(self):
with self.assertRaises(ValueError) as context:
PostalService()
self.assertEqual(str(context.exception), 'delivery_service argument not provided')
def test_post_sends_envelope_to_delivery_service(self):
postal_service = PostalService(delivery_service=self.mock_delivery_service)
test_envelope = Envelope('test', Message('test message'))
postal_service.post(test_envelope)
self.mock_delivery_service.deliver.assert_called_once_with(test_envelope)
def test_post_throws_error_when_envelope_is_none(self):
postal_service = PostalService(delivery_service=self.mock_delivery_service)
with self.assertRaises(ValueError) as context:
postal_service.post(None)
self.assertEqual(str(context.exception), 'An envelope must be passed to post a message')
class TestKafkaDeliveryService(unittest.TestCase):
def setUp(self):
self.messaging_properties = MessagingProperties()
self.messaging_properties.connection_address='test:9092'
self.messaging_properties.config={'api_version_auto_timeout_ms': 5000}
def test_init_without_messaging_config_throws_error(self):
with self.assertRaises(ValueError) as context:
KafkaDeliveryService()
self.assertEqual(str(context.exception), 'messaging_properties argument not provided')
def test_init_without_bootstrap_servers_throws_error(self):
messaging_properties = MessagingProperties()
messaging_properties.connection_address=None
with self.assertRaises(ValueError) as context:
KafkaDeliveryService(messaging_properties=messaging_properties)
self.assertEqual(str(context.exception), 'connection_address not set on messaging_properties')
@patch('ignition.service.messaging.KafkaProducer')
def test_deliver(self, mock_kafka_producer_init):
# need to set this explicitly because we've patched KafkaProducer
mock_kafka_producer_init.DEFAULT_CONFIG = KafkaProducer.DEFAULT_CONFIG
delivery_service = KafkaDeliveryService(messaging_properties=self.messaging_properties)
test_envelope = Envelope('test_topic', Message('test message'))
delivery_service.deliver(test_envelope)
mock_kafka_producer_init.assert_called_once_with(bootstrap_servers='test:9092', api_version_auto_timeout_ms=5000, client_id='ignition')
self.assertEqual(delivery_service.producer, mock_kafka_producer_init.return_value)
mock_kafka_producer = mock_kafka_producer_init.return_value
mock_kafka_producer.send.assert_called_once_with('test_topic', b'test message')
@patch('ignition.service.messaging.KafkaProducer')
def test_deliver_throws_error_when_envelope_is_none(self, mock_kafka_producer_init):
delivery_service = KafkaDeliveryService(messaging_properties=self.messaging_properties)
with self.assertRaises(ValueError) as context:
delivery_service.deliver(None)
self.assertEqual(str(context.exception), 'An envelope must be passed to deliver a message')
class TestKafkaInboxService(unittest.TestCase):
def setUp(self):
self.messaging_properties = MessagingProperties()
self.messaging_properties.connection_address='test:9092'
self.messaging_properties.config={'api_version_auto_timeout_ms':5000}
def test_init_without_messaging_config_throws_error(self):
with self.assertRaises(ValueError) as context:
KafkaInboxService()
self.assertEqual(str(context.exception), 'messaging_properties argument not provided')
def test_init_without_bootstrap_servers_throws_error(self):
messaging_properties = MessagingProperties()
messaging_properties.connection_address=None
with self.assertRaises(ValueError) as context:
KafkaInboxService(messaging_properties=messaging_properties)
self.assertEqual(str(context.exception), 'connection_address not set on messaging_properties')
@patch('ignition.service.messaging.KafkaInboxThread')
def test_watch_inbox_starts_thread(self, mock_kafka_inbox_thread_init):
inbox_service = KafkaInboxService(messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
mock_kafka_inbox_thread_init.assert_called_once_with('test:9092', 'test_group', 'test_topic', mock_read_inbox_func, inbox_service._KafkaInboxService__thread_exit_func, self.messaging_properties.config)
mock_kafka_inbox_thread_init.return_value.start.assert_called_once()
@patch('ignition.service.messaging.KafkaConsumer')
def test_watch_inbox_thread_inits_consumer(self, mock_kafka_consumer_init):
inbox_service = KafkaInboxService(messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
mock_kafka_consumer_init.assert_called_once_with('test_topic', bootstrap_servers='test:9092', group_id='test_group', enable_auto_commit=False)
@patch('ignition.service.messaging.KafkaConsumer')
def test_watch_inbox_thread_inits_consumer(self, mock_kafka_consumer_init):
mock_kafka_consumer = mock_kafka_consumer_init.return_value
mock_record_1 = MagicMock()
mock_record_2 = MagicMock()
infinite_iter_stop = False
infinite_iter_has_stopped = False
ready_for_second_message = False
second_message_sent = False
def build_iter():
def iter():
yield mock_record_1
while not infinite_iter_stop:
if ready_for_second_message:
yield mock_record_2
break
while not infinite_iter_stop:
time.sleep(0.001)
infinite_iter_has_stopped = True
return iter
mock_kafka_consumer.__iter__.side_effect = build_iter()
inbox_service = KafkaInboxService(messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
time.sleep(0.01)
try:
self.assertEqual(len(inbox_service.active_threads), 1)
expected_config = copy.copy(self.messaging_properties.config)
expected_config = {
'bootstrap_servers': 'test:9092',
'group_id': 'test_group',
'enable_auto_commit': False,
'client_id': 'ignition'
}
mock_kafka_consumer_init.assert_called_once_with('test_topic', **expected_config)
mock_kafka_consumer.__iter__.assert_called_once()
mock_record_1.value.decode.assert_called_once_with('utf-8')
mock_record_2.value.decode.assert_not_called()
mock_read_inbox_func.assert_called_once_with(mock_record_1.value.decode.return_value)
mock_kafka_consumer.commit.assert_called_once()
ready_for_second_message = True
time.sleep(1)
mock_record_2.value.decode.assert_called_once_with('utf-8')
mock_read_inbox_func.assert_called_with(mock_record_2.value.decode.return_value)
mock_kafka_consumer.commit.assert_has_calls([call(), call()])
finally:
infinite_iter_stop = True
time.sleep(1)
mock_kafka_consumer.close.assert_called_once()
self.assertEqual(len(inbox_service.active_threads), 0)
@patch('ignition.service.messaging._thread')
@patch('ignition.service.messaging.KafkaConsumer')
def test_watch_inbox_thread_calls_exit_func_on_error(self, mock_kafka_consumer_init, mock_thread):
mock_kafka_consumer = mock_kafka_consumer_init.return_value
mock_record_1 = MagicMock()
infinite_iter_stop = False
ready_for_message = True
def build_iter():
def iter():
while not infinite_iter_stop:
if ready_for_message:
yield mock_record_1
break
return iter
mock_kafka_consumer.__iter__.side_effect = build_iter()
inbox_service = KafkaInboxService(test_mode=True, messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
mock_read_inbox_func.side_effect = ValueError('Test error')
self.assertFalse(inbox_service.exited)
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
ready_for_message = True
time.sleep(0.03)
## Indicates the exit func on inbox_service was called when in "test_mode"
self.assertTrue(inbox_service.exited)
mock_kafka_consumer.commit.assert_not_called() | [
"ignition.service.messaging.KafkaDeliveryService",
"ignition.service.messaging.KafkaInboxService",
"ignition.service.messaging.MessagingProperties",
"unittest.mock.MagicMock",
"ignition.service.messaging.PostalService",
"unittest.mock.call",
"time.sleep",
"ignition.service.messaging.Message",
"copy.... | [((2218, 2267), 'unittest.mock.patch', 'patch', (['"""ignition.service.messaging.KafkaProducer"""'], {}), "('ignition.service.messaging.KafkaProducer')\n", (2223, 2267), False, 'from unittest.mock import patch, MagicMock, call\n'), ((3088, 3137), 'unittest.mock.patch', 'patch', (['"""ignition.service.messaging.KafkaProducer"""'], {}), "('ignition.service.messaging.KafkaProducer')\n", (3093, 3137), False, 'from unittest.mock import patch, MagicMock, call\n'), ((4448, 4500), 'unittest.mock.patch', 'patch', (['"""ignition.service.messaging.KafkaInboxThread"""'], {}), "('ignition.service.messaging.KafkaInboxThread')\n", (4453, 4500), False, 'from unittest.mock import patch, MagicMock, call\n'), ((5087, 5136), 'unittest.mock.patch', 'patch', (['"""ignition.service.messaging.KafkaConsumer"""'], {}), "('ignition.service.messaging.KafkaConsumer')\n", (5092, 5136), False, 'from unittest.mock import patch, MagicMock, call\n'), ((5591, 5640), 'unittest.mock.patch', 'patch', (['"""ignition.service.messaging.KafkaConsumer"""'], {}), "('ignition.service.messaging.KafkaConsumer')\n", (5596, 5640), False, 'from unittest.mock import patch, MagicMock, call\n'), ((8066, 8109), 'unittest.mock.patch', 'patch', (['"""ignition.service.messaging._thread"""'], {}), "('ignition.service.messaging._thread')\n", (8071, 8109), False, 'from unittest.mock import patch, MagicMock, call\n'), ((8115, 8164), 'unittest.mock.patch', 'patch', (['"""ignition.service.messaging.KafkaConsumer"""'], {}), "('ignition.service.messaging.KafkaConsumer')\n", (8120, 8164), False, 'from unittest.mock import patch, MagicMock, call\n'), ((360, 371), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (369, 371), False, 'from unittest.mock import patch, MagicMock, call\n'), ((696, 754), 'ignition.service.messaging.PostalService', 'PostalService', ([], {'delivery_service': 'self.mock_delivery_service'}), '(delivery_service=self.mock_delivery_service)\n', (709, 754), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((1032, 1090), 'ignition.service.messaging.PostalService', 'PostalService', ([], {'delivery_service': 'self.mock_delivery_service'}), '(delivery_service=self.mock_delivery_service)\n', (1045, 1090), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((1392, 1413), 'ignition.service.messaging.MessagingProperties', 'MessagingProperties', ([], {}), '()\n', (1411, 1413), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((1903, 1924), 'ignition.service.messaging.MessagingProperties', 'MessagingProperties', ([], {}), '()\n', (1922, 1924), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((2502, 2570), 'ignition.service.messaging.KafkaDeliveryService', 'KafkaDeliveryService', ([], {'messaging_properties': 'self.messaging_properties'}), '(messaging_properties=self.messaging_properties)\n', (2522, 2570), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((3254, 3322), 'ignition.service.messaging.KafkaDeliveryService', 'KafkaDeliveryService', ([], {'messaging_properties': 'self.messaging_properties'}), '(messaging_properties=self.messaging_properties)\n', (3274, 3322), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((3629, 3650), 'ignition.service.messaging.MessagingProperties', 'MessagingProperties', ([], {}), '()\n', (3648, 3650), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((4136, 4157), 'ignition.service.messaging.MessagingProperties', 'MessagingProperties', ([], {}), '()\n', (4155, 4157), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((4601, 4666), 'ignition.service.messaging.KafkaInboxService', 'KafkaInboxService', ([], {'messaging_properties': 'self.messaging_properties'}), '(messaging_properties=self.messaging_properties)\n', (4618, 4666), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((4698, 4709), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4707, 4709), False, 'from unittest.mock import patch, MagicMock, call\n'), ((5241, 5306), 'ignition.service.messaging.KafkaInboxService', 'KafkaInboxService', ([], {'messaging_properties': 'self.messaging_properties'}), '(messaging_properties=self.messaging_properties)\n', (5258, 5306), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((5338, 5349), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (5347, 5349), False, 'from unittest.mock import patch, MagicMock, call\n'), ((5813, 5824), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (5822, 5824), False, 'from unittest.mock import patch, MagicMock, call\n'), ((5849, 5860), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (5858, 5860), False, 'from unittest.mock import patch, MagicMock, call\n'), ((6516, 6581), 'ignition.service.messaging.KafkaInboxService', 'KafkaInboxService', ([], {'messaging_properties': 'self.messaging_properties'}), '(messaging_properties=self.messaging_properties)\n', (6533, 6581), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((6613, 6624), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6622, 6624), False, 'from unittest.mock import patch, MagicMock, call\n'), ((6717, 6733), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (6727, 6733), False, 'import time\n'), ((7928, 7941), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7938, 7941), False, 'import time\n'), ((8360, 8371), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (8369, 8371), False, 'from unittest.mock import patch, MagicMock, call\n'), ((8764, 8850), 'ignition.service.messaging.KafkaInboxService', 'KafkaInboxService', ([], {'test_mode': '(True)', 'messaging_properties': 'self.messaging_properties'}), '(test_mode=True, messaging_properties=self.\n messaging_properties)\n', (8781, 8850), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((8877, 8888), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (8886, 8888), False, 'from unittest.mock import patch, MagicMock, call\n'), ((9129, 9145), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (9139, 9145), False, 'import time\n'), ((503, 518), 'ignition.service.messaging.PostalService', 'PostalService', ([], {}), '()\n', (516, 518), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((796, 819), 'ignition.service.messaging.Message', 'Message', (['"""test message"""'], {}), "('test message')\n", (803, 819), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((1689, 1711), 'ignition.service.messaging.KafkaDeliveryService', 'KafkaDeliveryService', ([], {}), '()\n', (1709, 1711), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((2045, 2108), 'ignition.service.messaging.KafkaDeliveryService', 'KafkaDeliveryService', ([], {'messaging_properties': 'messaging_properties'}), '(messaging_properties=messaging_properties)\n', (2065, 2108), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((2618, 2641), 'ignition.service.messaging.Message', 'Message', (['"""test message"""'], {}), "('test message')\n", (2625, 2641), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((3925, 3944), 'ignition.service.messaging.KafkaInboxService', 'KafkaInboxService', ([], {}), '()\n', (3942, 3944), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((4278, 4338), 'ignition.service.messaging.KafkaInboxService', 'KafkaInboxService', ([], {'messaging_properties': 'messaging_properties'}), '(messaging_properties=messaging_properties)\n', (4295, 4338), False, 'from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties\n'), ((6844, 6887), 'copy.copy', 'copy.copy', (['self.messaging_properties.config'], {}), '(self.messaging_properties.config)\n', (6853, 6887), False, 'import copy\n'), ((7612, 7625), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7622, 7625), False, 'import time\n'), ((6337, 6354), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (6347, 6354), False, 'import time\n'), ((7848, 7854), 'unittest.mock.call', 'call', ([], {}), '()\n', (7852, 7854), False, 'from unittest.mock import patch, MagicMock, call\n'), ((7856, 7862), 'unittest.mock.call', 'call', ([], {}), '()\n', (7860, 7862), False, 'from unittest.mock import patch, MagicMock, call\n')] |
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
# This is bullshit! Problem was not solved!
# The first connection is successful, but after that postgres closes the connection and
# reconnects. At the moment, this script has already worked, so the application container crashes.
class Command(BaseCommand):
'''Django command to pause execution until database is avaliable'''
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_connection = None
while not db_connection:
try:
time.sleep(2) # ha-ha, yes! it's just a delay for the database to start up
db_connection = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database is up!'))
| [
"time.sleep"
] | [((666, 679), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (676, 679), False, 'import time\n'), ((931, 944), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (941, 944), False, 'import time\n')] |
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import libraries and modules
import tensorflow as tf
# Set logging verbosity to INFO for richer output
tf.logging.set_verbosity(tf.logging.INFO)
# The number of video classes
NUM_CLASSES = 4716
# Create an input function to read our training and validation data
# Then provide the results to the Estimator API
def read_dataset_video(file_pattern, mode, batch_size):
def _input_fn():
print("\nread_dataset_video: _input_fn: file_pattern = {}".format(file_pattern))
print("read_dataset_video: _input_fn: mode = {}".format(mode))
print("read_dataset_video: _input_fn: batch_size = {}".format(batch_size))
# This function will decode frame examples from the frame level TF Records
def decode_example(serialized_examples):
# Create feature map
feature_map = {
'video_id': tf.FixedLenFeature(shape = [], dtype = tf.string),
'labels': tf.VarLenFeature(dtype = tf.int64),
'mean_rgb': tf.FixedLenFeature(shape = [1024], dtype = tf.float32),
'mean_audio': tf.FixedLenFeature(shape = [128], dtype = tf.float32)
}
# Parse TF Records into our features
features = tf.parse_single_example(serialized = serialized_examples, features = feature_map)
print("\nread_dataset_video: _input_fn: decode_example: features = {}".format(features)) # shape = video_id = (), mean_rgb = (1024,), mean_audio = (128,), labels = SparseTensor object
# Extract and format labels
sparse_labels = features.pop("labels") # SparseTensor object
print("read_dataset_video: _input_fn: decode_example: sparse_labels = {}\n".format(sparse_labels))
labels = tf.cast(x = tf.sparse_to_dense(sparse_indices = sparse_labels.values, output_shape = (NUM_CLASSES,), sparse_values = 1, validate_indices = False), dtype = tf.float32)
print("read_dataset_video: _input_fn: decode_example: labels = {}\n".format(labels)) # shape = (NUM_CLASSES,)
return features, labels
# Create list of files from file pattern
file_list = tf.gfile.Glob(filename = file_pattern)
#print("read_dataset_video: _input_fn: file_list = {}".format(file_list))
# Create dataset from file list
dataset = tf.data.TFRecordDataset(filenames = file_list)
print("read_dataset_video: _input_fn: dataset.TFRecordDataset = {}".format(dataset))
# Decode TF Record dataset examples
dataset = dataset.map(map_func = lambda x: decode_example(serialized_examples = x))
print("read_dataset_video: _input_fn: dataset.map = {}".format(dataset))
# Determine amount of times to repeat file and if we should shuffle based on if we are training or evaluating
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # read files forever
# Shuffle the dataset within a buffer
dataset = dataset.shuffle(buffer_size = batch_size * 10, seed = None)
print("read_dataset_video: _input_fn: dataset.shuffle = {}".format(dataset))
else:
num_epochs = 1 # read files only once
# Repeat files num_epoch times
dataset = dataset.repeat(count = num_epochs)
print("read_dataset_video: _input_fn: dataset.repeat = {}".format(dataset))
# Group the data into batches
dataset = dataset.batch(batch_size = batch_size)
print("read_dataset_video: _input_fn: dataset.batch = {}".format(dataset))
# Create a iterator and then pull the next batch of features and labels from the example queue
batch_features, batch_labels = dataset.make_one_shot_iterator().get_next()
print("read_dataset_video: _input_fn: batch_features = {}".format(batch_features))
print("read_dataset_video: _input_fn: batch_labels = {}\n".format(batch_labels))
return batch_features, batch_labels
return _input_fn
# Create our model function to be used in our custom estimator
def video_level_model(features, labels, mode, params):
print("\nvideo_level_model: features = {}".format(features))
print("video_level_model: labels = {}".format(labels))
print("video_level_model: mode = {}".format(mode))
# 0. Configure network
# Get dynamic batch size
current_batch_size = tf.shape(features['mean_rgb'])[0]
print("video_level_model: current_batch_size = {}".format(current_batch_size))
# Stack all of the features into a 3-D tensor
combined_features = tf.concat(values = [features['mean_rgb'], features['mean_audio']], axis = 1) # shape = (current_batch_size, 1024 + 128)
print("video_level_model: combined_features = {}".format(combined_features))
# 1. Create the DNN structure now
# Create the input layer to our frame DNN
network = combined_features # shape = (current_batch_size, 1024 + 128)
print("video_level_model: network = combined_features = {}".format(network))
# Add hidden layers with the given number of units/neurons per layer
for units in params['hidden_units']:
network = tf.layers.dense(inputs = network, units = units, activation = tf.nn.relu) # shape = (current_batch_size, units)
print("video_level_model: network = {}, units = {}".format(network, units))
# Connect the final hidden layer to a dense layer with no activation to get the logits
logits = tf.layers.dense(inputs = network, units = NUM_CLASSES, activation = None) # shape = (current_batch_size, NUM_CLASSES)
print("video_level_model: logits = {}".format(logits))
# Select the top k logits in descending order
top_k_logits = tf.nn.top_k(input = logits, k = params['top_k'], sorted = True) # shape = (current_batch_size, top_k)
print("video_level_model: top_k_logits = {}".format(top_k_logits))
# Since this is a multi-class, multi-label problem we will apply a sigmoid, not a softmax, to each logit to get its own probability
probabilities = tf.sigmoid(logits) # shape = (current_batch_size, NUM_CLASSES)
print("video_level_model: probabilities = {}".format(probabilities))
# Select the top k probabilities in descending order
top_k_probabilities = tf.sigmoid(top_k_logits.values) # shape = (current_batch_size, top_k)
print("video_level_model: top_k_probabilities = {}".format(top_k_probabilities))
# Select the top k classes in descending order of likelihood
top_k_classes = top_k_logits.indices # shape = (current_batch_size, top_k)
print("video_level_model: top_k_classes = {}".format(top_k_classes))
# The 0/1 predictions based on a threshold, in this case the threshold is if the probability it greater than random chance
predictions = tf.where(
condition = probabilities > 1.0 / NUM_CLASSES, # shape = (current_batch_size, NUM_CLASSES)
x = tf.ones_like(tensor = probabilities),
y = tf.zeros_like(tensor = probabilities))
print("video_level_model: predictions = {}".format(predictions))
top_k_predictions = tf.where(
condition = top_k_probabilities > 1.0 / NUM_CLASSES, # shape = (current_batch_size, top_k)
x = tf.ones_like(tensor = top_k_probabilities),
y = tf.zeros_like(tensor = top_k_probabilities))
print("video_level_model: top_k_predictions = {}\n".format(top_k_predictions))
# 2. Loss function, training/eval ops
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
# Since this is a multi-class, multi-label problem, we will use sigmoid activation and cross entropy loss
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels = labels, logits = logits)
train_op = tf.contrib.layers.optimize_loss(
loss = loss,
global_step = tf.train.get_global_step(),
learning_rate = 0.01,
optimizer = "Adam")
eval_metric_ops = {
"accuracy": tf.metrics.mean_per_class_accuracy(labels = labels, predictions = predictions, num_classes = NUM_CLASSES)
}
else:
loss = None
train_op = None
eval_metric_ops = None
# 3. Create predictions
predictions_dict = {"logits": top_k_logits.values,
"probabilities": top_k_probabilities,
"predictions": top_k_predictions,
"classes": top_k_classes}
# 4. Create export outputs
export_outputs = {"predict_export_outputs": tf.estimator.export.PredictOutput(outputs = predictions_dict)}
# 5. Return EstimatorSpec
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = predictions_dict,
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops,
export_outputs = export_outputs)
# Create our serving input function to accept the data at serving and send it in the right format to our custom estimator
def serving_input_fn():
# This function fixes the shape and type of our input strings
def fix_shape_and_type_for_serving(placeholder):
# String split each string in the batch and output the values from the resulting SparseTensors
split_string = tf.map_fn(
fn = lambda x: tf.string_split(source = [placeholder[x]], delimiter=',').values,
elems = tf.range(start = 0, limit = tf.shape(input = placeholder)[0]),
dtype = tf.string) # shape = (batch_size, input_sequence_length)
print("serving_input_fn: fix_shape_and_type_for_serving: split_string = {}".format(split_string))
# Convert each string in the split tensor to float
feature_tensor = tf.string_to_number(string_tensor = split_string, out_type = tf.float32) # shape = (batch_size, input_sequence_length)
print("serving_input_fn: fix_shape_and_type_for_serving: feature_tensor = {}".format(feature_tensor))
return feature_tensor
# This function fixes dynamic shape ambiguity of last dimension so that we will be able to use it in our DNN (since tf.layers.dense require the last dimension to be known)
def get_shape_and_set_modified_shape_2D(tensor, additional_dimension_sizes):
# Get static shape for tensor and convert it to list
shape = tensor.get_shape().as_list()
# Set outer shape to additional_dimension_sizes[0] since we know that this is the correct size
shape[1] = additional_dimension_sizes[0]
# Set the shape of tensor to our modified shape
tensor.set_shape(shape = shape) # shape = (batch_size, additional_dimension_sizes[0])
print("serving_input_fn: get_shape_and_set_modified_shape_2D: tensor = {}, additional_dimension_sizes = {}".format(tensor, additional_dimension_sizes))
return tensor
# Create placeholders to accept the data sent to the model at serving time
feature_placeholders = { # all features come in as a batch of strings, shape = (batch_size,), this was so because of passing the arrays to online ml-engine prediction
'video_id': tf.placeholder(dtype = tf.string, shape = [None]),
'mean_rgb': tf.placeholder(dtype = tf.string, shape = [None]),
'mean_audio': tf.placeholder(dtype = tf.string, shape = [None])
}
print("\nserving_input_fn: feature_placeholders = {}".format(feature_placeholders))
# Create feature tensors
features = {
"video_id": feature_placeholders["video_id"],
"mean_rgb": fix_shape_and_type_for_serving(placeholder = feature_placeholders["mean_rgb"]),
"mean_audio": fix_shape_and_type_for_serving(placeholder = feature_placeholders["mean_audio"])
}
print("serving_input_fn: features = {}".format(features))
# Fix dynamic shape ambiguity of feature tensors for our DNN
features["mean_rgb"] = get_shape_and_set_modified_shape_2D(tensor = features["mean_rgb"], additional_dimension_sizes = [1024])
features["mean_audio"] = get_shape_and_set_modified_shape_2D(tensor = features["mean_audio"], additional_dimension_sizes = [128])
print("serving_input_fn: features = {}\n".format(features))
return tf.estimator.export.ServingInputReceiver(features = features, receiver_tensors = feature_placeholders)
# Create custom estimator's train and evaluate function
def train_and_evaluate(args):
# Create custom estimator's train and evaluate function
estimator = tf.estimator.Estimator(
model_fn = video_level_model,
model_dir = args['output_dir'],
params = {'hidden_units': args['hidden_units'], 'top_k': args['top_k']})
# Create train spec to read in our training data
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset_video(
file_pattern = args['train_file_pattern'],
mode = tf.estimator.ModeKeys.TRAIN,
batch_size = args['batch_size']),
max_steps = args['train_steps'])
# Create exporter to save out the complete model to disk
exporter = tf.estimator.LatestExporter(name = 'exporter', serving_input_receiver_fn = serving_input_fn)
# Create eval spec to read in our validation data and export our model
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset_video(
file_pattern = args['eval_file_pattern'],
mode = tf.estimator.ModeKeys.EVAL,
batch_size = args['batch_size']),
steps = None,
exporters = exporter,
start_delay_secs = args['start_delay_secs'],
throttle_secs = args['throttle_secs'])
# Create train and evaluate loop to train and evaluate our estimator
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) | [
"tensorflow.shape",
"tensorflow.logging.set_verbosity",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.string_split",
"tensorflow.estimator.export.PredictOutput",
"tensorflow.ones_like",
"tensorflow.estimator.LatestExporter",
"tensorflow.estimator.train_and_evaluate",
"tensorflow.placeholder",
... | [((747, 788), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (771, 788), True, 'import tensorflow as tf\n'), ((5278, 5350), 'tensorflow.concat', 'tf.concat', ([], {'values': "[features['mean_rgb'], features['mean_audio']]", 'axis': '(1)'}), "(values=[features['mean_rgb'], features['mean_audio']], axis=1)\n", (5287, 5350), True, 'import tensorflow as tf\n'), ((6184, 6251), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'network', 'units': 'NUM_CLASSES', 'activation': 'None'}), '(inputs=network, units=NUM_CLASSES, activation=None)\n', (6199, 6251), True, 'import tensorflow as tf\n'), ((6439, 6496), 'tensorflow.nn.top_k', 'tf.nn.top_k', ([], {'input': 'logits', 'k': "params['top_k']", 'sorted': '(True)'}), "(input=logits, k=params['top_k'], sorted=True)\n", (6450, 6496), True, 'import tensorflow as tf\n'), ((6773, 6791), 'tensorflow.sigmoid', 'tf.sigmoid', (['logits'], {}), '(logits)\n', (6783, 6791), True, 'import tensorflow as tf\n'), ((7001, 7032), 'tensorflow.sigmoid', 'tf.sigmoid', (['top_k_logits.values'], {}), '(top_k_logits.values)\n', (7011, 7032), True, 'import tensorflow as tf\n'), ((9418, 9588), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions_dict', 'loss': 'loss', 'train_op': 'train_op', 'eval_metric_ops': 'eval_metric_ops', 'export_outputs': 'export_outputs'}), '(mode=mode, predictions=predictions_dict, loss=\n loss, train_op=train_op, eval_metric_ops=eval_metric_ops,\n export_outputs=export_outputs)\n', (9444, 9588), True, 'import tensorflow as tf\n'), ((12991, 13093), 'tensorflow.estimator.export.ServingInputReceiver', 'tf.estimator.export.ServingInputReceiver', ([], {'features': 'features', 'receiver_tensors': 'feature_placeholders'}), '(features=features,\n receiver_tensors=feature_placeholders)\n', (13031, 13093), True, 'import tensorflow as tf\n'), ((13262, 13422), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'video_level_model', 'model_dir': "args['output_dir']", 'params': "{'hidden_units': args['hidden_units'], 'top_k': args['top_k']}"}), "(model_fn=video_level_model, model_dir=args[\n 'output_dir'], params={'hidden_units': args['hidden_units'], 'top_k':\n args['top_k']})\n", (13284, 13422), True, 'import tensorflow as tf\n'), ((13859, 13952), 'tensorflow.estimator.LatestExporter', 'tf.estimator.LatestExporter', ([], {'name': '"""exporter"""', 'serving_input_receiver_fn': 'serving_input_fn'}), "(name='exporter', serving_input_receiver_fn=\n serving_input_fn)\n", (13886, 13952), True, 'import tensorflow as tf\n'), ((14495, 14560), 'tensorflow.estimator.train_and_evaluate', 'tf.estimator.train_and_evaluate', (['estimator', 'train_spec', 'eval_spec'], {}), '(estimator, train_spec, eval_spec)\n', (14526, 14560), True, 'import tensorflow as tf\n'), ((2822, 2858), 'tensorflow.gfile.Glob', 'tf.gfile.Glob', ([], {'filename': 'file_pattern'}), '(filename=file_pattern)\n', (2835, 2858), True, 'import tensorflow as tf\n'), ((3006, 3050), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', ([], {'filenames': 'file_list'}), '(filenames=file_list)\n', (3029, 3050), True, 'import tensorflow as tf\n'), ((5082, 5112), 'tensorflow.shape', 'tf.shape', (["features['mean_rgb']"], {}), "(features['mean_rgb'])\n", (5090, 5112), True, 'import tensorflow as tf\n'), ((5871, 5938), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'network', 'units': 'units', 'activation': 'tf.nn.relu'}), '(inputs=network, units=units, activation=tf.nn.relu)\n', (5886, 5938), True, 'import tensorflow as tf\n'), ((8417, 8490), 'tensorflow.losses.sigmoid_cross_entropy', 'tf.losses.sigmoid_cross_entropy', ([], {'multi_class_labels': 'labels', 'logits': 'logits'}), '(multi_class_labels=labels, logits=logits)\n', (8448, 8490), True, 'import tensorflow as tf\n'), ((9310, 9369), 'tensorflow.estimator.export.PredictOutput', 'tf.estimator.export.PredictOutput', ([], {'outputs': 'predictions_dict'}), '(outputs=predictions_dict)\n', (9343, 9369), True, 'import tensorflow as tf\n'), ((10510, 10578), 'tensorflow.string_to_number', 'tf.string_to_number', ([], {'string_tensor': 'split_string', 'out_type': 'tf.float32'}), '(string_tensor=split_string, out_type=tf.float32)\n', (10529, 10578), True, 'import tensorflow as tf\n'), ((11905, 11950), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string', 'shape': '[None]'}), '(dtype=tf.string, shape=[None])\n', (11919, 11950), True, 'import tensorflow as tf\n'), ((11977, 12022), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string', 'shape': '[None]'}), '(dtype=tf.string, shape=[None])\n', (11991, 12022), True, 'import tensorflow as tf\n'), ((12051, 12096), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string', 'shape': '[None]'}), '(dtype=tf.string, shape=[None])\n', (12065, 12096), True, 'import tensorflow as tf\n'), ((1890, 1967), 'tensorflow.parse_single_example', 'tf.parse_single_example', ([], {'serialized': 'serialized_examples', 'features': 'feature_map'}), '(serialized=serialized_examples, features=feature_map)\n', (1913, 1967), True, 'import tensorflow as tf\n'), ((7659, 7693), 'tensorflow.ones_like', 'tf.ones_like', ([], {'tensor': 'probabilities'}), '(tensor=probabilities)\n', (7671, 7693), True, 'import tensorflow as tf\n'), ((7711, 7746), 'tensorflow.zeros_like', 'tf.zeros_like', ([], {'tensor': 'probabilities'}), '(tensor=probabilities)\n', (7724, 7746), True, 'import tensorflow as tf\n'), ((7970, 8010), 'tensorflow.ones_like', 'tf.ones_like', ([], {'tensor': 'top_k_probabilities'}), '(tensor=top_k_probabilities)\n', (7982, 8010), True, 'import tensorflow as tf\n'), ((8028, 8069), 'tensorflow.zeros_like', 'tf.zeros_like', ([], {'tensor': 'top_k_probabilities'}), '(tensor=top_k_probabilities)\n', (8041, 8069), True, 'import tensorflow as tf\n'), ((8757, 8860), 'tensorflow.metrics.mean_per_class_accuracy', 'tf.metrics.mean_per_class_accuracy', ([], {'labels': 'labels', 'predictions': 'predictions', 'num_classes': 'NUM_CLASSES'}), '(labels=labels, predictions=predictions,\n num_classes=NUM_CLASSES)\n', (8791, 8860), True, 'import tensorflow as tf\n'), ((1515, 1560), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', ([], {'shape': '[]', 'dtype': 'tf.string'}), '(shape=[], dtype=tf.string)\n', (1533, 1560), True, 'import tensorflow as tf\n'), ((1593, 1625), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.int64'}), '(dtype=tf.int64)\n', (1609, 1625), True, 'import tensorflow as tf\n'), ((1658, 1708), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', ([], {'shape': '[1024]', 'dtype': 'tf.float32'}), '(shape=[1024], dtype=tf.float32)\n', (1676, 1708), True, 'import tensorflow as tf\n'), ((1745, 1794), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', ([], {'shape': '[128]', 'dtype': 'tf.float32'}), '(shape=[128], dtype=tf.float32)\n', (1763, 1794), True, 'import tensorflow as tf\n'), ((8607, 8633), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (8631, 8633), True, 'import tensorflow as tf\n'), ((2432, 2562), 'tensorflow.sparse_to_dense', 'tf.sparse_to_dense', ([], {'sparse_indices': 'sparse_labels.values', 'output_shape': '(NUM_CLASSES,)', 'sparse_values': '(1)', 'validate_indices': '(False)'}), '(sparse_indices=sparse_labels.values, output_shape=(\n NUM_CLASSES,), sparse_values=1, validate_indices=False)\n', (2450, 2562), True, 'import tensorflow as tf\n'), ((10085, 10140), 'tensorflow.string_split', 'tf.string_split', ([], {'source': '[placeholder[x]]', 'delimiter': '""","""'}), "(source=[placeholder[x]], delimiter=',')\n", (10100, 10140), True, 'import tensorflow as tf\n'), ((10201, 10228), 'tensorflow.shape', 'tf.shape', ([], {'input': 'placeholder'}), '(input=placeholder)\n', (10209, 10228), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and 'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
'''
from copy import deepcopy
import logging
from lxml import etree
try:
from PIL import Image
except ImportError:
import Image
import zipfile
import shutil
import distutils.dir_util
import re
import time
import os
from os.path import join
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
TEMPLATE_DIR = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(TEMPLATE_DIR):
TEMPLATE_DIR = join(os.path.dirname(__file__), 'template') # dev
_DOCX_DIR_NAME = 'docx-template'
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
# Text Content
'mv':'urn:schemas-microsoft-com:mac:vml',
'mo':'http://schemas.microsoft.com/office/mac/office/2008/main',
've':'http://schemas.openxmlformats.org/markup-compatibility/2006',
'o':'urn:schemas-microsoft-com:office:office',
'r':'http://schemas.openxmlformats.org/officeDocument/2006/relationships',
'm':'http://schemas.openxmlformats.org/officeDocument/2006/math',
'v':'urn:schemas-microsoft-com:vml',
'w':'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10':'urn:schemas-microsoft-com:office:word',
'wne':'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'wp':'http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing',
'a':'http://schemas.openxmlformats.org/drawingml/2006/main',
'pic':'http://schemas.openxmlformats.org/drawingml/2006/picture',
# Properties (core and extended)
'cp':"http://schemas.openxmlformats.org/package/2006/metadata/core-properties",
'dc':"http://purl.org/dc/elements/1.1/",
'dcterms':"http://purl.org/dc/terms/",
'dcmitype':"http://purl.org/dc/dcmitype/",
'xsi':"http://www.w3.org/2001/XMLSchema-instance",
'ep':'http://schemas.openxmlformats.org/officeDocument/2006/extended-properties',
# Content Types (we're just making up our own namespaces here to save time)
'ct':'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships (we're just making up our own namespaces here to save time)
'pr':'http://schemas.openxmlformats.org/package/2006/relationships'
}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname,tagtext=None,nsprefix='w',attributes=None,attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
nsprefix = nsprefix[0] # FIXME: rest of code below expects a single prefix
if nsprefix:
namespace = '{'+nsprefixes[nsprefix]+'}'
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty string
# (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute, attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
raise ValueError('Page break style "%s" not implemented. Valid styles: %s.' % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br',attributes={'type':type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz',attributes={'w':'12240','h':'15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz',attributes={'h':'12240','w':'15840', 'orient':'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
'''Make a new paragraph element, containing a run, and some text.
Return the paragraph element.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If paratext is a list, spawn multiple run/text elements.
Support text styles (paratext must then be a list of lists in the form
<text> / <style>. Style is a string containing a combination of 'bui' chars
example
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu'),
]
'''
# Make our elements
paragraph = makeelement('p')
if isinstance(paratext, list):
text = []
for pt in paratext:
if isinstance(pt, (list,tuple)):
text.append([makeelement('t',tagtext=pt[0]), pt[1]])
else:
text.append([makeelement('t',tagtext=pt), ''])
else:
text = [[makeelement('t',tagtext=paratext),''],]
pPr = makeelement('pPr')
pStyle = makeelement('pStyle',attributes={'val':style})
pJc = makeelement('jc',attributes={'val':jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text the run, and the run to the paragraph
paragraph.append(pPr)
for t in text:
run = makeelement('r')
rPr = makeelement('rPr')
if isinstance(t[1], list):
for prop in t[1]: # custom properties
rPr.append(prop)
else:
# Apply styles
if t[1].find('b') > -1:
b = makeelement('b')
rPr.append(b)
if t[1].find('u') > -1:
u = makeelement('u',attributes={'val':'single'})
rPr.append(u)
if t[1].find('i') > -1:
i = makeelement('i')
rPr.append(i)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(t[0])
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
# FIXME - doesn't quite work...read from string as temp hack...
#types = makeelement('Types',nsprefix='ct')
types = etree.fromstring('''<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types"></Types>''')
parts = {
'/word/theme/theme1.xml':'application/vnd.openxmlformats-officedocument.theme+xml',
'/word/fontTable.xml':'application/vnd.openxmlformats-officedocument.wordprocessingml.fontTable+xml',
'/docProps/core.xml':'application/vnd.openxmlformats-package.core-properties+xml',
'/docProps/app.xml':'application/vnd.openxmlformats-officedocument.extended-properties+xml',
'/word/document.xml':'application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml',
'/word/settings.xml':'application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml',
'/word/numbering.xml':'application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml',
'/word/styles.xml':'application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml',
'/word/webSettings.xml':'application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml'
}
for part in parts:
types.append(makeelement('Override',nsprefix=None,attributes={'PartName':part,'ContentType':parts[part]}))
# Add support for filetypes
filetypes = {'rels':'application/vnd.openxmlformats-package.relationships+xml','xml':'application/xml','jpeg':'image/jpeg','gif':'image/gif','png':'image/png'}
for extension in filetypes:
types.append(makeelement('Default',nsprefix=None,attributes={'Extension':extension,'ContentType':filetypes[extension]}))
return types
def heading(headingtext,headinglevel,lang='en'):
'''Make a new heading, return the heading element'''
lmap = {
'en': 'Heading',
'it': 'Titolo',
}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement('pStyle',attributes={'val':lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t',tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0, twunit='auto', borders={}, celstyle=None, rowstyle=None, table_props=None):
'''Get a list of lists, return a table
@param list contents: A list of lists describing contents
Every item in the list can be a string or a valid
XML element itself. It can also be a list. In that case
all the listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be threated as heading
or not
@param list colw: A list of interger. The list must have same element
count of content lines. Specify column Widths in
wunitS
@param string cwunit: Unit user for column width:
'pct': fifties of a percent
'dxa': twenties of a point
'nil': no width
'auto': automagically determined
@param int tblw: Table width
@param int twunit: Unit used for table width. Same as cwunit
@param dict borders: Dictionary defining table border. Supported keys are:
'top', 'left', 'bottom', 'right', 'insideH', 'insideV', 'all'
When specified, the 'all' key has precedence over others.
Each key must define a dict of border attributes:
color: The color of the border, in hex or 'auto'
space: The space, measured in points
sz: The size of the border, in eights of a point
val: The style of the border, see http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align': specify the alignment, see paragraph documentation,
@return lxml.etree: Generated XML etree element
'''
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle',attributes={'val':''})
tableprops.append(tablestyle)
if not table_props:
table_props = {}
for k, attr in table_props.items():
if isinstance(attr, etree._Element):
tableprops.append(attr)
else:
prop = makeelement(k, attributes=attr)
tableprops.append(prop)
tablewidth = makeelement('tblW',attributes={'w':str(tblw),'type':str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = str(borders[k][a])
borderelem = makeelement(b,attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook',attributes={'val':'0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
tablegrid.append(makeelement('gridCol',attributes={'w':str(colw[i]) if colw else '2390'}))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle',attributes={'val':'000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w':str(colw[i]),'type':cwunit}
else:
wattr = {'w':'0','type':'auto'}
cellwidth = makeelement('tcW',attributes=wattr)
cellstyle = makeelement('shd',attributes={'val':'clear','color':'auto','fill':'FFFFFF','themeFill':'text2','themeFillTint':'99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading,]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h,jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
if rowstyle:
rowprops = makeelement('trPr')
if 'height' in rowstyle:
rowHeight = makeelement('trHeight', attributes={'val': str(rowstyle['height']),
'hRule': 'exact'})
rowprops.append(rowHeight)
row.append(rowprops)
i = 0
for content_cell in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w':str(colw[i]),'type':cwunit}
else:
wattr = {'w':'0','type':'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
align = 'left'
cell_spec_style = {}
if celstyle:
cell_spec_style = deepcopy(celstyle[i])
if isinstance(content_cell, dict):
cell_spec_style.update(content_cell['style'])
content_cell = content_cell['content']
# spec. align property
SPEC_PROPS = ['align',]
if 'align' in cell_spec_style:
align = celstyle[i]['align']
# any property for cell, by OOXML specification
for cs, attrs in cell_spec_style.items():
if cs in SPEC_PROPS:
continue
cell_prop = makeelement(cs, attributes=attrs)
cellprops.append(cell_prop)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content_cell, (list, tuple)):
content_cell = [content_cell,]
for c in content_cell:
# cell.append(cellprops)
if isinstance(c, etree._Element):
cell.append(c)
else:
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
temp_dir=None):
'''Take a relationshiplist, picture file name, and return a paragraph containing the image
and an updated relationshiplist'''
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture'''
# Copy the file into the media dir
assert temp_dir
media_dir = join(temp_dir, _DOCX_DIR_NAME, 'word', 'media')
if not os.path.isdir(media_dir):
os.makedirs(media_dir)
shutil.copyfile(picname, join(media_dir,picname))
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth,pixelheight = Image.open(picname).size[0:2]
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12667
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# Set relationship ID to the first available
picid = '2'
picrelid = 'rId'+str(len(relationshiplist)+1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relationships/image',
'media/'+picname])
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area (stretch, tile, etc.)
blipfill = makeelement('blipFill',nsprefix='pic')
blipfill.append(makeelement('blip',nsprefix='a',attrnsprefix='r',attributes={'embed':picrelid}))
stretch = makeelement('stretch',nsprefix='a')
stretch.append(makeelement('fillRect',nsprefix='a'))
blipfill.append(makeelement('srcRect',nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr',nsprefix='pic')
cnvpr = makeelement('cNvPr',nsprefix='pic',
attributes={'id':'0','name':'Picture 1','descr':picname})
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr',nsprefix='pic')
cnvpicpr.append(makeelement('picLocks', nsprefix='a',
attributes={'noChangeAspect':str(int(nochangeaspect)),
'noChangeArrowheads':str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr',nsprefix='pic',attributes={'bwMode':'auto'})
xfrm = makeelement('xfrm',nsprefix='a')
xfrm.append(makeelement('off',nsprefix='a',attributes={'x':'0','y':'0'}))
xfrm.append(makeelement('ext',nsprefix='a',attributes={'cx':width,'cy':height}))
prstgeom = makeelement('prstGeom',nsprefix='a',attributes={'prst':'rect'})
prstgeom.append(makeelement('avLst',nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic',nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement('graphicData',nsprefix='a',
attributes={'uri':'http://schemas.openxmlformats.org/drawingml/2006/picture'})
graphicdata.append(pic)
graphic = makeelement('graphic',nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks',nsprefix='a',attributes={'noChangeAspect':'1'})
framepr = makeelement('cNvGraphicFramePr',nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr',nsprefix='wp',
attributes={'id':picid,'name':'Picture 1','descr':picdescription})
effectextent = makeelement('effectExtent',nsprefix='wp',
attributes={'l':'25400','t':'0','r':'0','b':'0'})
extent = makeelement('extent',nsprefix='wp',attributes={'cx':width,'cy':height})
inline = makeelement('inline',
attributes={'distT':"0",'distB':"0",'distL':"0",'distR':"0"},nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
return relationshiplist,paragraph
def search(document,search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document,search,replace):
'''Replace all occurences of string with a different string, return updated document'''
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search,replace,element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1,len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s,s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document,search,replace,bs=3):
'''Replace all occurences of string with a different string, return updated document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
'''
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1,len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s,s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s", searchre.pattern)
log.debug("Requested replacement: %s", replace)
log.debug("Matched text: %s", txtsearch)
log.debug( "Matched text (splitted): %s", map(lambda i:i.text,searchels))
log.debug("Matched at position: %s", match.start())
log.debug( "matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace (list, tuple)):
log.debug("Will replace with LIST OF ELEMENTS")
else:
log.debug("Will replace with:", re.sub(search,replace,txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element. Puth in the
# whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process it later
replace = [ replace, ]
if isinstance(replace, (list,tuple)):
# I'm replacing with a list of etree elements
# clear the text in the tag and append the element after the
# parent paragraph
# (because t elements cannot have childs)
p = findTypeParent(searchels[i], '{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(search,'',txtsearch)
insindex = p.getparent().index(p) + 1
for r in replace:
p.getparent().insert(insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(search,replace,txtsearch)
replaced = True
log.debug("Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist=[]
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements, iterate through each
# paragraph, appending all text (t) children to that paragraphs text.
for para in paralist:
paratext=u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title,subject,creator,keywords,lastmodifiedby=None):
'''Create core properties (common document properties referred to in the 'Dublin Core' specification).
See appproperties() for other stuff.'''
coreprops = makeelement('coreProperties',nsprefix='cp')
coreprops.append(makeelement('title',tagtext=title,nsprefix='dc'))
coreprops.append(makeelement('subject',tagtext=subject,nsprefix='dc'))
coreprops.append(makeelement('creator',tagtext=creator,nsprefix='dc'))
coreprops.append(makeelement('keywords',tagtext=','.join(keywords),nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy',tagtext=lastmodifiedby,nsprefix='cp'))
coreprops.append(makeelement('revision',tagtext='1',nsprefix='cp'))
coreprops.append(makeelement('category',tagtext='Examples',nsprefix='cp'))
coreprops.append(makeelement('description',tagtext='Examples',nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the lement from a string as a workaround...
for doctime in ['created','modified']:
coreprops.append(etree.fromstring('''<dcterms:'''+doctime+''' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:W3CDTF">'''+currenttime+'''</dcterms:'''+doctime+'''>'''))
pass
return coreprops
def appproperties():
'''Create app-specific properties. See docproperties() for more common document properties.'''
appprops = makeelement('Properties',nsprefix='ep')
appprops = etree.fromstring(
b'''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes"></Properties>''')
props = {
'Template':'Normal.dotm',
'TotalTime':'6',
'Pages':'1',
'Words':'83',
'Characters':'475',
'Application':'Microsoft Word 12.0.0',
'DocSecurity':'0',
'Lines':'12',
'Paragraphs':'8',
'ScaleCrop':'false',
'LinksUpToDate':'false',
'CharactersWithSpaces':'583',
'SharedDoc':'false',
'HyperlinksChanged':'false',
'AppVersion':'12.0000',
}
for prop in props:
appprops.append(makeelement(prop,tagtext=props[prop],nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist = [
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/numbering','numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/styles','styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/settings','settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/webSettings','webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/fontTable','fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/theme','theme/theme1.xml'],
]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships',nsprefix='pr')
relationships = etree.fromstring(
'''<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">
</Relationships>'''
)
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
relationships.append(makeelement('Relationship',attributes={'Id':'rId'+str(count+1),
'Type':relationship[0],'Target':relationship[1]},nsprefix=None))
count += 1
return relationships
def savedocx(document, coreprops, appprops, contenttypes, websettings, wordrelationships, output,
temp_dir=None):
'''Save a modified document'''
assert temp_dir
assert os.path.isdir(temp_dir)
docx_dir = join(temp_dir, _DOCX_DIR_NAME)
# Copy whole template to temporary directory
distutils.dir_util.copy_tree(TEMPLATE_DIR, docx_dir) # directory can already exist
docxfile = zipfile.ZipFile(output,mode='w',compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(docx_dir)
# Serialize our trees into out zip file
treesandfiles = {document:'word/document.xml',
coreprops:'docProps/core.xml',
appprops:'docProps/app.xml',
contenttypes:'[Content_Types].xml',
websettings:'word/webSettings.xml',
wordrelationships:'word/_rels/document.xml.rels'}
for tree in treesandfiles:
log.info('Saving: '+treesandfiles[tree] )
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree],treestring)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath,dirnames,filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
| [
"logging.getLogger",
"lxml.etree.Element",
"zipfile.ZipFile",
"re.compile",
"os.makedirs",
"time.strftime",
"os.path.join",
"os.chdir",
"os.path.dirname",
"Image.open",
"os.path.isdir",
"lxml.etree.fromstring",
"copy.deepcopy",
"os.path.abspath",
"re.sub",
"os.walk",
"lxml.etree.tost... | [((519, 546), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (536, 546), False, 'import logging\n'), ((715, 740), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (730, 740), False, 'import os\n'), ((778, 805), 'os.path.isdir', 'os.path.isdir', (['TEMPLATE_DIR'], {}), '(TEMPLATE_DIR)\n', (791, 805), False, 'import os\n'), ((2796, 2817), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file'], {}), '(file)\n', (2811, 2817), False, 'import zipfile\n'), ((2882, 2910), 'lxml.etree.fromstring', 'etree.fromstring', (['xmlcontent'], {}), '(xmlcontent)\n', (2898, 2910), False, 'from lxml import etree\n'), ((3640, 3694), 'lxml.etree.Element', 'etree.Element', (['(namespace + tagname)'], {'nsmap': 'namespacemap'}), '(namespace + tagname, nsmap=namespacemap)\n', (3653, 3694), False, 'from lxml import etree\n'), ((8123, 8237), 'lxml.etree.fromstring', 'etree.fromstring', (['"""<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types"></Types>"""'], {}), '(\n \'<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types"></Types>\'\n )\n', (8139, 8237), False, 'from lxml import etree\n'), ((17993, 18040), 'os.path.join', 'join', (['temp_dir', '_DOCX_DIR_NAME', '"""word"""', '"""media"""'], {}), "(temp_dir, _DOCX_DIR_NAME, 'word', 'media')\n", (17997, 18040), False, 'from os.path import join\n'), ((21997, 22015), 're.compile', 're.compile', (['search'], {}), '(search)\n', (22007, 22015), False, 'import re\n'), ((22429, 22447), 're.compile', 're.compile', (['search'], {}), '(search)\n', (22439, 22447), False, 'import re\n'), ((24890, 24908), 're.compile', 're.compile', (['search'], {}), '(search)\n', (24900, 24908), False, 'import re\n'), ((28210, 28228), 're.compile', 're.compile', (['search'], {}), '(search)\n', (28220, 28228), False, 'import re\n'), ((35174, 35209), 'time.strftime', 'time.strftime', (['"""%Y-%m-%dT%H:%M:%SZ"""'], {}), "('%Y-%m-%dT%H:%M:%SZ')\n", (35187, 35209), False, 'import time\n'), ((35951, 36230), 'lxml.etree.fromstring', 'etree.fromstring', (['b\'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\\n <Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes"></Properties>\''], {}), '(\n b\'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\\n <Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes"></Properties>\'\n )\n', (35967, 36230), False, 'from lxml import etree\n'), ((38045, 38188), 'lxml.etree.fromstring', 'etree.fromstring', (['"""<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">\n </Relationships>"""'], {}), '(\n """<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">\n </Relationships>"""\n )\n', (38061, 38188), False, 'from lxml import etree\n'), ((38694, 38717), 'os.path.isdir', 'os.path.isdir', (['temp_dir'], {}), '(temp_dir)\n', (38707, 38717), False, 'import os\n'), ((38733, 38763), 'os.path.join', 'join', (['temp_dir', '_DOCX_DIR_NAME'], {}), '(temp_dir, _DOCX_DIR_NAME)\n', (38737, 38763), False, 'from os.path import join\n'), ((38915, 38982), 'zipfile.ZipFile', 'zipfile.ZipFile', (['output'], {'mode': '"""w"""', 'compression': 'zipfile.ZIP_DEFLATED'}), "(output, mode='w', compression=zipfile.ZIP_DEFLATED)\n", (38930, 38982), False, 'import zipfile\n'), ((39034, 39054), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (39049, 39054), False, 'import os\n'), ((39087, 39105), 'os.chdir', 'os.chdir', (['docx_dir'], {}), '(docx_dir)\n', (39095, 39105), False, 'import os\n'), ((39828, 39840), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (39835, 39840), False, 'import os\n'), ((40213, 40231), 'os.chdir', 'os.chdir', (['prev_dir'], {}), '(prev_dir)\n', (40221, 40231), False, 'import os\n'), ((831, 856), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (846, 856), False, 'import os\n'), ((18052, 18076), 'os.path.isdir', 'os.path.isdir', (['media_dir'], {}), '(media_dir)\n', (18065, 18076), False, 'import os\n'), ((18086, 18108), 'os.makedirs', 'os.makedirs', (['media_dir'], {}), '(media_dir)\n', (18097, 18108), False, 'import os\n'), ((18138, 18162), 'os.path.join', 'join', (['media_dir', 'picname'], {}), '(media_dir, picname)\n', (18142, 18162), False, 'from os.path import join\n'), ((39594, 39633), 'lxml.etree.tostring', 'etree.tostring', (['tree'], {'pretty_print': '(True)'}), '(tree, pretty_print=True)\n', (39608, 39633), False, 'from lxml import etree\n'), ((35508, 35729), 'lxml.etree.fromstring', 'etree.fromstring', (['(\'<dcterms:\' + doctime +\n \' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:W3CDTF">\'\n + currenttime + \'</dcterms:\' + doctime + \'>\')'], {}), '(\'<dcterms:\' + doctime +\n \' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:W3CDTF">\'\n + currenttime + \'</dcterms:\' + doctime + \'>\')\n', (35524, 35729), False, 'from lxml import etree\n'), ((39973, 39996), 'os.path.join', 'join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (39977, 39996), False, 'from os.path import join\n'), ((16253, 16274), 'copy.deepcopy', 'deepcopy', (['celstyle[i]'], {}), '(celstyle[i])\n', (16261, 16274), False, 'from copy import deepcopy\n'), ((18335, 18354), 'Image.open', 'Image.open', (['picname'], {}), '(picname)\n', (18345, 18354), False, 'import Image\n'), ((22674, 22711), 're.sub', 're.sub', (['search', 'replace', 'element.text'], {}), '(search, replace, element.text)\n', (22680, 22711), False, 'import re\n'), ((32211, 32240), 're.sub', 're.sub', (['search', '""""""', 'txtsearch'], {}), "(search, '', txtsearch)\n", (32217, 32240), False, 'import re\n'), ((32708, 32742), 're.sub', 're.sub', (['search', 'replace', 'txtsearch'], {}), '(search, replace, txtsearch)\n', (32714, 32742), False, 'import re\n'), ((30917, 30951), 're.sub', 're.sub', (['search', 'replace', 'txtsearch'], {}), '(search, replace, txtsearch)\n', (30923, 30951), False, 'import re\n')] |
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
from functools import cmp_to_key
from typing import Dict, Iterable, Optional
import tortuga.objects.admin
import tortuga.objects.component
import tortuga.objects.hardwareProfile
import tortuga.objects.kitSource
import tortuga.objects.nic
import tortuga.objects.node
import tortuga.objects.osInfo
import tortuga.objects.partition
from tortuga.objects.tortugaObject import TortugaObject, TortugaObjectList
from tortuga.utility.helper import str2bool
from .validators import RegexValidator
class SoftwareProfile(TortugaObject): \
# pylint: disable=too-many-public-methods
ROOT_TAG = 'softwareprofile'
validators = {
'name': RegexValidator(pattern='[a-zA-Z0-9-_]+')
}
def __init__(self, name=None):
TortugaObject.__init__(
self, {
'name': name,
'admins': TortugaObjectList(),
'partitions': TortugaObjectList(),
'components': TortugaObjectList(),
'nodes': TortugaObjectList(),
'kitsources': TortugaObjectList(),
}, ['name', 'id'], SoftwareProfile.ROOT_TAG)
def __repr__(self):
return self.getName()
def setId(self, id_):
""" Set software profile id."""
self['id'] = id_
def getId(self):
""" Return software profile id. """
return self.get('id')
def setName(self, name):
""" Set software profile name."""
self['name'] = name
def getName(self):
""" Return software profile name. """
return self.get('name')
def setDescription(self, description):
""" Set description."""
self['description'] = description
def getDescription(self):
""" Return description. """
return self.get('description')
def setKernel(self, kernel):
""" Set kernel."""
self['kernel'] = kernel
def getKernel(self):
""" Return kernel. """
return self.get('kernel')
def setKernelParams(self, kernelParams):
""" Set kernel params."""
self['kernelParams'] = kernelParams
def getKernelParams(self):
""" Return kernel params. """
return self.get('kernelParams')
def setInitrd(self, initrd):
""" Set initrd."""
self['initrd'] = initrd
def getInitrd(self):
""" Return initird. """
return self.get('initrd')
def setOsId(self, osId):
""" Set OS id."""
self['osId'] = osId
def getOsId(self):
""" Return OS id. """
return self.get('osId')
def setType(self, type_):
""" Set type."""
self['type'] = type_
def getType(self):
""" Return type. """
return self.get('type')
def setMinNodes(self, val):
self['minNodes'] = val
def getMinNodes(self):
return self.get('minNodes')
def setMaxNodes(self, value):
self['maxNodes'] = value
def getMaxNodes(self):
return self.get('maxNodes')
def setLockedState(self, val):
self['lockedState'] = val
def getLockedState(self):
return self.get('lockedState')
def setOsInfo(self, osInfo):
""" Set OS info. """
self['os'] = osInfo
def getOsInfo(self):
""" Get OS info. """
return self.get('os')
def setComponents(self, comp):
""" Set components. """
self['components'] = comp
def getComponents(self):
""" Get Components """
return self.get('components')
def setAdmins(self, admins):
""" set Admins """
self['admins'] = admins
def getAdmins(self):
""" Get Admins """
return self.get('admins')
def setPartitions(self, val):
self['partitions'] = val
def getPartitions(self):
""" We want to always return the partitions sorted by
device and partition number """
partitions = self.get('partitions')
if partitions:
partitions.sort(key=cmp_to_key(_partition_compare))
return partitions
def setNodes(self, val):
self['nodes'] = val
def getNodes(self):
return self.get('nodes')
def setUsableHardwareProfiles(self, val):
self['hardwareprofiles'] = val
def getUsableHardwareProfiles(self):
return self.get('hardwareprofiles')
def getKitSources(self):
return self.get('kitsources')
def setKitSources(self, kitsources):
self['kitsources'] = kitsources
def getTags(self) -> Dict[str, str]:
"""
Gets all the tags for this software profile.
:return Dict[str, str]: the tags
"""
return self.get('tags')
def setTags(self, tags: Dict[str, str]):
"""
Sets the tags for this hardware profile.
:param Dict[str, str] tags: the tags to set for this hardware profile
"""
self['tags'] = tags
def getMetadata(self):
return self.get('metadata')
def setMetadata(self, value):
self['metadata'] = value
def getDataRoot(self):
return self.get('dataRoot')
def setDataRoot(self, value):
self['dataRoot'] = value
def getDataRsync(self):
return self.get('dataRsync')
def setDataRsync(self, value):
self['dataRsync'] = value
@staticmethod
def getKeys():
return [
'id',
'name',
'osId',
'description',
'kernel',
'initrd',
'kernelParams',
'type',
'minNodes',
'maxNodes',
'lockedState',
'isIdle',
'metadata',
'tags',
'dataRoot',
'dataRsync',
]
@classmethod
def getFromDict(cls, _dict, ignore: Optional[Iterable[str]] = None):
""" Get software profile from _dict. """
softwareProfile = super(SoftwareProfile, cls).getFromDict(_dict)
softwareProfile.setAdmins(
tortuga.objects.admin.Admin.getListFromDict(_dict))
softwareProfile.setComponents(
tortuga.objects.component.Component.getListFromDict(_dict))
softwareProfile.setNodes(
tortuga.objects.node.Node.getListFromDict(_dict))
osDict = _dict.get(tortuga.objects.osInfo.OsInfo.ROOT_TAG)
if osDict:
softwareProfile.setOsInfo(
tortuga.objects.osInfo.OsInfo.getFromDict(osDict))
softwareProfile.setPartitions(
tortuga.objects.partition.Partition.getListFromDict(_dict))
softwareProfile.\
setUsableHardwareProfiles(
tortuga.objects.hardwareProfile.HardwareProfile.
getListFromDict(_dict))
# kitsources
softwareProfile.setKitSources(
tortuga.objects.kitSource.KitSource.getListFromDict(_dict))
return softwareProfile
@classmethod
def getFromDbDict(cls, _dict, ignore: Optional[Iterable[str]] = None):
softwareProfile = super(SoftwareProfile, cls).getFromDict(
_dict, ignore=ignore)
softwareProfile.setAdmins(
tortuga.objects.admin.Admin.getListFromDbDict(_dict))
softwareProfile.setComponents(
tortuga.objects.component.Component.getListFromDbDict(_dict))
if not ignore or 'nodes' not in ignore:
softwareProfile.setNodes(
tortuga.objects.node.Node.getListFromDbDict(_dict))
osDict = _dict.get(tortuga.objects.osInfo.OsInfo.ROOT_TAG)
if osDict:
softwareProfile.setOsInfo(
tortuga.objects.osInfo.OsInfo.getFromDbDict(
osDict.__dict__))
softwareProfile.setPartitions(
tortuga.objects.partition.Partition.getListFromDbDict(_dict))
softwareProfile.setUsableHardwareProfiles(
tortuga.objects.hardwareProfile.HardwareProfile.
getListFromDbDict(_dict))
tags = {tag.name: tag.value for tag in _dict.get('tags', [])}
softwareProfile.setTags(tags)
return softwareProfile
def _partition_compare(x, y):
deviceDiff = x.getDeviceTuple()[0] - y.getDeviceTuple()[0]
if deviceDiff == 0:
deviceDiff = x.getDeviceTuple()[1] - y.getDeviceTuple()[1]
return deviceDiff
| [
"tortuga.objects.tortugaObject.TortugaObjectList",
"functools.cmp_to_key"
] | [((1457, 1476), 'tortuga.objects.tortugaObject.TortugaObjectList', 'TortugaObjectList', ([], {}), '()\n', (1474, 1476), False, 'from tortuga.objects.tortugaObject import TortugaObject, TortugaObjectList\n'), ((1508, 1527), 'tortuga.objects.tortugaObject.TortugaObjectList', 'TortugaObjectList', ([], {}), '()\n', (1525, 1527), False, 'from tortuga.objects.tortugaObject import TortugaObject, TortugaObjectList\n'), ((1559, 1578), 'tortuga.objects.tortugaObject.TortugaObjectList', 'TortugaObjectList', ([], {}), '()\n', (1576, 1578), False, 'from tortuga.objects.tortugaObject import TortugaObject, TortugaObjectList\n'), ((1605, 1624), 'tortuga.objects.tortugaObject.TortugaObjectList', 'TortugaObjectList', ([], {}), '()\n', (1622, 1624), False, 'from tortuga.objects.tortugaObject import TortugaObject, TortugaObjectList\n'), ((1656, 1675), 'tortuga.objects.tortugaObject.TortugaObjectList', 'TortugaObjectList', ([], {}), '()\n', (1673, 1675), False, 'from tortuga.objects.tortugaObject import TortugaObject, TortugaObjectList\n'), ((4607, 4637), 'functools.cmp_to_key', 'cmp_to_key', (['_partition_compare'], {}), '(_partition_compare)\n', (4617, 4637), False, 'from functools import cmp_to_key\n')] |
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db import models
from django.utils import timezone
from django.db.models import BooleanField
class UserManager(BaseUserManager):
def _create_user(self, email, password, usertype, is_staff, is_superuser, **extra_fields):
if not email:
raise ValueError('Users must have an email address')
now = timezone.now()
email = self.normalize_email(email)
user = self.model(
email=email,
usertype=usertype,
is_staff=is_staff,
is_active=True,
is_superuser=is_superuser,
last_login=now,
date_joined=now,
**extra_fields
)
user.set_password(password)
user.save(using=self._db)
return user
def create_candidate(self, email, password, **extra_fields):
return self._create_user(email, password, 1, False, False, **extra_fields)
def create_recruiter(self, email, password, **extra_fields):
return self._create_user(email, password, 0, False, False, **extra_fields)
def create_staff(self, email, password, **extra_fields):
return self._create_user(email, password, None, True, False, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
user=self._create_user(email, password, None, True, True, **extra_fields)
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
'''
Usertype can have 3 values:
Candidate - 1
Recruiter - 0
Staff - None
'''
USERTYPES = [
(1, 'Candidates'),
(0, 'Recruiters'),
(None, 'Staff'),
]
email = models.EmailField(max_length=254, unique=True)
name = models.CharField(max_length=254, null=True, blank=True)
usertype = models.PositiveSmallIntegerField(choices=USERTYPES, null=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
last_login = models.DateTimeField(null=True, blank=True)
date_joined = models.DateTimeField(auto_now_add=True)
USERNAME_FIELD = 'email'
EMAIL_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def get_absolute_url(self):
return "/users/%i/" % (self.pk)
| [
"django.db.models.EmailField",
"django.db.models.BooleanField",
"django.utils.timezone.now",
"django.db.models.DateTimeField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.CharField"
] | [((1785, 1831), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(254)', 'unique': '(True)'}), '(max_length=254, unique=True)\n', (1802, 1831), False, 'from django.db import models\n'), ((1843, 1898), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'null': '(True)', 'blank': '(True)'}), '(max_length=254, null=True, blank=True)\n', (1859, 1898), False, 'from django.db import models\n'), ((1914, 1976), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': 'USERTYPES', 'null': '(True)'}), '(choices=USERTYPES, null=True)\n', (1946, 1976), False, 'from django.db import models\n'), ((1992, 2026), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2011, 2026), False, 'from django.db import models\n'), ((2046, 2080), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2065, 2080), False, 'from django.db import models\n'), ((2097, 2130), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (2116, 2130), False, 'from django.db import models\n'), ((2148, 2191), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (2168, 2191), False, 'from django.db import models\n'), ((2210, 2249), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2230, 2249), False, 'from django.db import models\n'), ((433, 447), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (445, 447), False, 'from django.utils import timezone\n')] |
import iota_client
client = iota_client.Client()
print(
client.get_output("a22cba0667c922cbb1f8bdcaf970b2a881ccd6e88e2fcce50374de2aac7c37720000")
) | [
"iota_client.Client"
] | [((28, 48), 'iota_client.Client', 'iota_client.Client', ([], {}), '()\n', (46, 48), False, 'import iota_client\n')] |
import sys
sys.path.insert(0, "Modelos/Mapa")
from Map import *
from Item import Item
"""
Define a classe que manipula a logica do jogo.
"""
class Game:
"""
Define um jogador do jogo.
"""
class Player:
"""
Cria uma nova instancia de jogador
"""
def __init__(self, name, addr, map):
self.Name = name
self.Addr = addr #IP
self.Room = 1 # Jogador sempre inicia na sala 1
self.Inventario = []
self.Inventario.append(Item("Mapa", map))
"""
Cria uma nova instancia de jogo.
"""
def __init__(self, map):
self.Map = map
self.Players = []
"""
Cria um novo jogador. Retorna falso se jogador ja existe. Retorna verdadeiro se jogador foi criado.
"""
def CriaJogador(self, playerId, addr):
if (self.getPlayer(playerId) != None):
return "FAIL"
self.Players.append(self.Player(playerId, addr, self.Map.showMap()))
return "OK"
"""
Examina a sala em que o jogador se encontra.
"""
def Examina(self, playerId):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
room = self.Map.getRoom(player.Room)
return room.ToString()
"""
Move o jogador para outra sala.
"""
def Move(self, playerId, direction):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
room = self.Map.getRoom(player.Room)
roomInDirection = room.GetRoomInDirection(direction)
if (roomInDirection != None):
if (room.CanMoveTo(direction)):
player.Room = roomInDirection
for item in player.Inventario:
if item.Name == "Mapa":
item.Description = self.Map.showMap(roomInDirection)
return "O jogador se moveu para a sala " + str(roomInDirection) + "."
else:
return "A porta esta fechada."
else:
return "Nao ha sala nesta direcao."
def Inventario(self, playerId):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
result = ""
ln = len(player.Inventario)
for i in range(0, ln):
result += player.Inventario[i].Name
if (i + 1 != ln):
result += " ; "
return result
def UsaItem(self, playerId, itemName, target = None):
player = self.getPlayer(playerId)
abriuPorta = False
if(player == None):
return "Player nao encontrado"
salaAtual = self.Map.getRoom(player.Room)
for item in player.Inventario:
if item.Name == itemName:
if "Nota" in str(item.Name):
return item.Description
elif item.Name == "Mapa":
return item.Description
elif item.Name == "ObjetoFinal":
if salaAtual.ID == 1:
return "Fim"
else:
return "Voce precisa estar na sala inicial para utilizar este objeto"
elif ("Chave" in str(item.Name)):
if target == None:
return "Escolha uma porta para abrir"
else:
for x in range(0, len(salaAtual.Doors)):
if str(x) == target:
abriuPorta = True
self.Map.getRoom(player.Room).Doors[x].OpenDoor()
if(abriuPorta == True):
return "Porta "+target+" foi aberta"
else:
return "Nao foi possivel abrir a porta "+target
return "Portas da sala "+str(salaAtual.ID)+" foram abertas"
else:
return "Item nao existente no inventario"
"""
Jogador pega um objeto que esta na sala atual
"""
def Pegar(self, playerId, objeto):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
salaAtual = self.Map.getRoom(player.Room)
if(salaAtual == None):
return "Sala nao encontrada"
objetoAdicionado = False
lenObjetos = len(salaAtual.Objects)
for x in range(0, lenObjetos):
objetoEncontrado = salaAtual.Objects[x]
if(str(objeto) == str(objetoEncontrado.Name)):
objetoAdicionado = True
del salaAtual.Objects[x]
player.Inventario.append(Item(objetoEncontrado.Name, objetoEncontrado.Description))
break
if(objetoAdicionado == True):
return "Objeto " + objeto + " adicionado ao inventario"
else:
return "Objeto " + objeto + " nao foi encontrado nesta sala"
"""
Larga objeto do inventario na sala atual
"""
def Largar(self, playerId, objeto):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
salaAtual = self.Map.getRoom(player.Room)
objetoDeletado = False
for x in range(0, len(player.Inventario)):
itemPlayer = player.Inventario[x]
if(itemPlayer.Name == str(objeto)):
objetoDeletado = True
del player.Inventario[x]
salaAtual.Objects.append(Item(itemPlayer.Name, itemPlayer.Description))
if(objetoDeletado == True):
return "Objeto " + objeto + " adicionado a sala"
else:
return "Objeto " + objeto + " nao foi encontrado no inventario"
"""
Envia um texto para um jogador especifico
"""
def Cochichar(self, playerSource, text, playerTarget):
player = self.getPlayer(playerSource)
for x in range(0, len(self.Players)):
if(self.Players[x].Name == str(playerTarget)):
return (self.Players[x].Addr, text)
"""
Retorna os players presente na sala passada por parametro
"""
def getPlayersInRoom(self, room):
sala = self.Map.getRoom(room)
if(sala == None):
return "Sala nao encontrada"
playersNaSala = []
for x in range(0, len(self.Players)):
if(self.Players[x].Room == room):
playersNaSala.append(self.Players[x].Addr)
return playersNaSala
"""
Busca o jogador na lista de jogadores conectados ao jogo.
"""
def getPlayer(self, playerName):
for player in self.Players:
if player.Name == playerName:
return player
return None
| [
"Item.Item",
"sys.path.insert"
] | [((11, 45), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""Modelos/Mapa"""'], {}), "(0, 'Modelos/Mapa')\n", (26, 45), False, 'import sys\n'), ((445, 462), 'Item.Item', 'Item', (['"""Mapa"""', 'map'], {}), "('Mapa', map)\n", (449, 462), False, 'from Item import Item\n'), ((3737, 3794), 'Item.Item', 'Item', (['objetoEncontrado.Name', 'objetoEncontrado.Description'], {}), '(objetoEncontrado.Name, objetoEncontrado.Description)\n', (3741, 3794), False, 'from Item import Item\n'), ((4426, 4471), 'Item.Item', 'Item', (['itemPlayer.Name', 'itemPlayer.Description'], {}), '(itemPlayer.Name, itemPlayer.Description)\n', (4430, 4471), False, 'from Item import Item\n')] |
from datetime import datetime
{
datetime(2019, 12, 30, 0, 0): 35,
datetime(2020, 1, 6, 0, 0): 27,
datetime(2020, 1, 13, 0, 0): 39,
datetime(2020, 1, 20, 0, 0): 120,
datetime(2020, 1, 27, 0, 0): 73,
datetime(2020, 2, 3, 0, 0): 48,
datetime(2020, 2, 10, 0, 0): 35,
datetime(2020, 2, 17, 0, 0): 89,
datetime(2020, 2, 24, 0, 0): 81,
datetime(2020, 3, 2, 0, 0): 116,
datetime(2020, 3, 9, 0, 0): 90,
datetime(2020, 3, 16, 0, 0): 195,
datetime(2020, 3, 23, 0, 0): 406,
datetime(2020, 3, 30, 0, 0): 642,
datetime(2020, 4, 6, 0, 0): 652,
datetime(2020, 4, 13, 0, 0): 684,
datetime(2020, 4, 20, 0, 0): 1393,
datetime(2020, 4, 27, 0, 0): 1755,
datetime(2020, 5, 4, 0, 0): 1251,
datetime(2020, 5, 11, 0, 0): 1566,
datetime(2020, 5, 18, 0, 0): 1986,
datetime(2020, 5, 25, 0, 0): 2141,
datetime(2020, 6, 1, 0, 0): 1581,
datetime(2020, 6, 8, 0, 0): 1640,
datetime(2020, 6, 15, 0, 0): 1406,
datetime(2020, 6, 22, 0, 0): 1902,
datetime(2020, 6, 29, 0, 0): 2078,
datetime(2020, 7, 6, 0, 0): 1821,
datetime(2020, 7, 13, 0, 0): 1854,
datetime(2020, 7, 20, 0, 0): 2308,
datetime(2020, 7, 27, 0, 0): 2637,
datetime(2020, 8, 3, 0, 0): 2275,
datetime(2020, 8, 10, 0, 0): 1717,
datetime(2020, 8, 17, 0, 0): 1474,
datetime(2020, 8, 24, 0, 0): 2234,
datetime(2020, 8, 31, 0, 0): 2275,
datetime(2020, 9, 7, 0, 0): 2180,
datetime(2020, 9, 14, 0, 0): 1824,
datetime(2020, 9, 21, 0, 0): 1609,
datetime(2020, 9, 28, 0, 0): 1714,
datetime(2020, 10, 5, 0, 0): 2849,
datetime(2020, 10, 12, 0, 0): 1425,
datetime(2020, 10, 19, 0, 0): 569,
datetime(2020, 10, 26, 0, 0): 210,
datetime(2020, 11, 2, 0, 0): 331,
datetime(2020, 11, 9, 0, 0): 229,
datetime(2020, 11, 16, 0, 0): 162,
datetime(2020, 11, 23, 0, 0): 164,
datetime(2020, 11, 30, 0, 0): 102,
datetime(2020, 12, 7, 0, 0): 75,
datetime(2020, 12, 14, 0, 0): 55,
datetime(2020, 12, 21, 0, 0): 150,
datetime(2020, 12, 28, 0, 0): 11,
}
| [
"datetime.datetime"
] | [((37, 65), 'datetime.datetime', 'datetime', (['(2019)', '(12)', '(30)', '(0)', '(0)'], {}), '(2019, 12, 30, 0, 0)\n', (45, 65), False, 'from datetime import datetime\n'), ((75, 101), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(6)', '(0)', '(0)'], {}), '(2020, 1, 6, 0, 0)\n', (83, 101), False, 'from datetime import datetime\n'), ((111, 138), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(13)', '(0)', '(0)'], {}), '(2020, 1, 13, 0, 0)\n', (119, 138), False, 'from datetime import datetime\n'), ((148, 175), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(20)', '(0)', '(0)'], {}), '(2020, 1, 20, 0, 0)\n', (156, 175), False, 'from datetime import datetime\n'), ((186, 213), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(27)', '(0)', '(0)'], {}), '(2020, 1, 27, 0, 0)\n', (194, 213), False, 'from datetime import datetime\n'), ((223, 249), 'datetime.datetime', 'datetime', (['(2020)', '(2)', '(3)', '(0)', '(0)'], {}), '(2020, 2, 3, 0, 0)\n', (231, 249), False, 'from datetime import datetime\n'), ((259, 286), 'datetime.datetime', 'datetime', (['(2020)', '(2)', '(10)', '(0)', '(0)'], {}), '(2020, 2, 10, 0, 0)\n', (267, 286), False, 'from datetime import datetime\n'), ((296, 323), 'datetime.datetime', 'datetime', (['(2020)', '(2)', '(17)', '(0)', '(0)'], {}), '(2020, 2, 17, 0, 0)\n', (304, 323), False, 'from datetime import datetime\n'), ((333, 360), 'datetime.datetime', 'datetime', (['(2020)', '(2)', '(24)', '(0)', '(0)'], {}), '(2020, 2, 24, 0, 0)\n', (341, 360), False, 'from datetime import datetime\n'), ((370, 396), 'datetime.datetime', 'datetime', (['(2020)', '(3)', '(2)', '(0)', '(0)'], {}), '(2020, 3, 2, 0, 0)\n', (378, 396), False, 'from datetime import datetime\n'), ((407, 433), 'datetime.datetime', 'datetime', (['(2020)', '(3)', '(9)', '(0)', '(0)'], {}), '(2020, 3, 9, 0, 0)\n', (415, 433), False, 'from datetime import datetime\n'), ((443, 470), 'datetime.datetime', 'datetime', (['(2020)', '(3)', '(16)', '(0)', '(0)'], {}), '(2020, 3, 16, 0, 0)\n', (451, 470), False, 'from datetime import datetime\n'), ((481, 508), 'datetime.datetime', 'datetime', (['(2020)', '(3)', '(23)', '(0)', '(0)'], {}), '(2020, 3, 23, 0, 0)\n', (489, 508), False, 'from datetime import datetime\n'), ((519, 546), 'datetime.datetime', 'datetime', (['(2020)', '(3)', '(30)', '(0)', '(0)'], {}), '(2020, 3, 30, 0, 0)\n', (527, 546), False, 'from datetime import datetime\n'), ((557, 583), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(6)', '(0)', '(0)'], {}), '(2020, 4, 6, 0, 0)\n', (565, 583), False, 'from datetime import datetime\n'), ((594, 621), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(13)', '(0)', '(0)'], {}), '(2020, 4, 13, 0, 0)\n', (602, 621), False, 'from datetime import datetime\n'), ((632, 659), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(20)', '(0)', '(0)'], {}), '(2020, 4, 20, 0, 0)\n', (640, 659), False, 'from datetime import datetime\n'), ((671, 698), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(27)', '(0)', '(0)'], {}), '(2020, 4, 27, 0, 0)\n', (679, 698), False, 'from datetime import datetime\n'), ((710, 736), 'datetime.datetime', 'datetime', (['(2020)', '(5)', '(4)', '(0)', '(0)'], {}), '(2020, 5, 4, 0, 0)\n', (718, 736), False, 'from datetime import datetime\n'), ((748, 775), 'datetime.datetime', 'datetime', (['(2020)', '(5)', '(11)', '(0)', '(0)'], {}), '(2020, 5, 11, 0, 0)\n', (756, 775), False, 'from datetime import datetime\n'), ((787, 814), 'datetime.datetime', 'datetime', (['(2020)', '(5)', '(18)', '(0)', '(0)'], {}), '(2020, 5, 18, 0, 0)\n', (795, 814), False, 'from datetime import datetime\n'), ((826, 853), 'datetime.datetime', 'datetime', (['(2020)', '(5)', '(25)', '(0)', '(0)'], {}), '(2020, 5, 25, 0, 0)\n', (834, 853), False, 'from datetime import datetime\n'), ((865, 891), 'datetime.datetime', 'datetime', (['(2020)', '(6)', '(1)', '(0)', '(0)'], {}), '(2020, 6, 1, 0, 0)\n', (873, 891), False, 'from datetime import datetime\n'), ((903, 929), 'datetime.datetime', 'datetime', (['(2020)', '(6)', '(8)', '(0)', '(0)'], {}), '(2020, 6, 8, 0, 0)\n', (911, 929), False, 'from datetime import datetime\n'), ((941, 968), 'datetime.datetime', 'datetime', (['(2020)', '(6)', '(15)', '(0)', '(0)'], {}), '(2020, 6, 15, 0, 0)\n', (949, 968), False, 'from datetime import datetime\n'), ((980, 1007), 'datetime.datetime', 'datetime', (['(2020)', '(6)', '(22)', '(0)', '(0)'], {}), '(2020, 6, 22, 0, 0)\n', (988, 1007), False, 'from datetime import datetime\n'), ((1019, 1046), 'datetime.datetime', 'datetime', (['(2020)', '(6)', '(29)', '(0)', '(0)'], {}), '(2020, 6, 29, 0, 0)\n', (1027, 1046), False, 'from datetime import datetime\n'), ((1058, 1084), 'datetime.datetime', 'datetime', (['(2020)', '(7)', '(6)', '(0)', '(0)'], {}), '(2020, 7, 6, 0, 0)\n', (1066, 1084), False, 'from datetime import datetime\n'), ((1096, 1123), 'datetime.datetime', 'datetime', (['(2020)', '(7)', '(13)', '(0)', '(0)'], {}), '(2020, 7, 13, 0, 0)\n', (1104, 1123), False, 'from datetime import datetime\n'), ((1135, 1162), 'datetime.datetime', 'datetime', (['(2020)', '(7)', '(20)', '(0)', '(0)'], {}), '(2020, 7, 20, 0, 0)\n', (1143, 1162), False, 'from datetime import datetime\n'), ((1174, 1201), 'datetime.datetime', 'datetime', (['(2020)', '(7)', '(27)', '(0)', '(0)'], {}), '(2020, 7, 27, 0, 0)\n', (1182, 1201), False, 'from datetime import datetime\n'), ((1213, 1239), 'datetime.datetime', 'datetime', (['(2020)', '(8)', '(3)', '(0)', '(0)'], {}), '(2020, 8, 3, 0, 0)\n', (1221, 1239), False, 'from datetime import datetime\n'), ((1251, 1278), 'datetime.datetime', 'datetime', (['(2020)', '(8)', '(10)', '(0)', '(0)'], {}), '(2020, 8, 10, 0, 0)\n', (1259, 1278), False, 'from datetime import datetime\n'), ((1290, 1317), 'datetime.datetime', 'datetime', (['(2020)', '(8)', '(17)', '(0)', '(0)'], {}), '(2020, 8, 17, 0, 0)\n', (1298, 1317), False, 'from datetime import datetime\n'), ((1329, 1356), 'datetime.datetime', 'datetime', (['(2020)', '(8)', '(24)', '(0)', '(0)'], {}), '(2020, 8, 24, 0, 0)\n', (1337, 1356), False, 'from datetime import datetime\n'), ((1368, 1395), 'datetime.datetime', 'datetime', (['(2020)', '(8)', '(31)', '(0)', '(0)'], {}), '(2020, 8, 31, 0, 0)\n', (1376, 1395), False, 'from datetime import datetime\n'), ((1407, 1433), 'datetime.datetime', 'datetime', (['(2020)', '(9)', '(7)', '(0)', '(0)'], {}), '(2020, 9, 7, 0, 0)\n', (1415, 1433), False, 'from datetime import datetime\n'), ((1445, 1472), 'datetime.datetime', 'datetime', (['(2020)', '(9)', '(14)', '(0)', '(0)'], {}), '(2020, 9, 14, 0, 0)\n', (1453, 1472), False, 'from datetime import datetime\n'), ((1484, 1511), 'datetime.datetime', 'datetime', (['(2020)', '(9)', '(21)', '(0)', '(0)'], {}), '(2020, 9, 21, 0, 0)\n', (1492, 1511), False, 'from datetime import datetime\n'), ((1523, 1550), 'datetime.datetime', 'datetime', (['(2020)', '(9)', '(28)', '(0)', '(0)'], {}), '(2020, 9, 28, 0, 0)\n', (1531, 1550), False, 'from datetime import datetime\n'), ((1562, 1589), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(5)', '(0)', '(0)'], {}), '(2020, 10, 5, 0, 0)\n', (1570, 1589), False, 'from datetime import datetime\n'), ((1601, 1629), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(12)', '(0)', '(0)'], {}), '(2020, 10, 12, 0, 0)\n', (1609, 1629), False, 'from datetime import datetime\n'), ((1641, 1669), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(19)', '(0)', '(0)'], {}), '(2020, 10, 19, 0, 0)\n', (1649, 1669), False, 'from datetime import datetime\n'), ((1680, 1708), 'datetime.datetime', 'datetime', (['(2020)', '(10)', '(26)', '(0)', '(0)'], {}), '(2020, 10, 26, 0, 0)\n', (1688, 1708), False, 'from datetime import datetime\n'), ((1719, 1746), 'datetime.datetime', 'datetime', (['(2020)', '(11)', '(2)', '(0)', '(0)'], {}), '(2020, 11, 2, 0, 0)\n', (1727, 1746), False, 'from datetime import datetime\n'), ((1757, 1784), 'datetime.datetime', 'datetime', (['(2020)', '(11)', '(9)', '(0)', '(0)'], {}), '(2020, 11, 9, 0, 0)\n', (1765, 1784), False, 'from datetime import datetime\n'), ((1795, 1823), 'datetime.datetime', 'datetime', (['(2020)', '(11)', '(16)', '(0)', '(0)'], {}), '(2020, 11, 16, 0, 0)\n', (1803, 1823), False, 'from datetime import datetime\n'), ((1834, 1862), 'datetime.datetime', 'datetime', (['(2020)', '(11)', '(23)', '(0)', '(0)'], {}), '(2020, 11, 23, 0, 0)\n', (1842, 1862), False, 'from datetime import datetime\n'), ((1873, 1901), 'datetime.datetime', 'datetime', (['(2020)', '(11)', '(30)', '(0)', '(0)'], {}), '(2020, 11, 30, 0, 0)\n', (1881, 1901), False, 'from datetime import datetime\n'), ((1912, 1939), 'datetime.datetime', 'datetime', (['(2020)', '(12)', '(7)', '(0)', '(0)'], {}), '(2020, 12, 7, 0, 0)\n', (1920, 1939), False, 'from datetime import datetime\n'), ((1949, 1977), 'datetime.datetime', 'datetime', (['(2020)', '(12)', '(14)', '(0)', '(0)'], {}), '(2020, 12, 14, 0, 0)\n', (1957, 1977), False, 'from datetime import datetime\n'), ((1987, 2015), 'datetime.datetime', 'datetime', (['(2020)', '(12)', '(21)', '(0)', '(0)'], {}), '(2020, 12, 21, 0, 0)\n', (1995, 2015), False, 'from datetime import datetime\n'), ((2026, 2054), 'datetime.datetime', 'datetime', (['(2020)', '(12)', '(28)', '(0)', '(0)'], {}), '(2020, 12, 28, 0, 0)\n', (2034, 2054), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Migrate some legacy process attributes.
Attribute keys that are renamed:
* `_sealed` -> `sealed`
Attribute keys that are removed entirely:
* `_finished`
* `_failed`
* `_aborted`
* `_do_abort`
Finally, after these first migrations, any remaining process nodes that still do not have a sealed attribute and have
it set to `True`. Excluding the nodes that have a `process_state` attribute of one of the active states `created`,
running` or `waiting`, because those are actual valid active processes that are not yet sealed.
This is identical to migration e734dd5e50d7
Revision ID: django_0040
Revises: django_0039
"""
from alembic import op
import sqlalchemy as sa
revision = 'django_0040'
down_revision = 'django_0039'
branch_labels = None
depends_on = None
def upgrade():
"""Migrations for the upgrade."""
conn = op.get_bind()
statement = sa.text(
"""
UPDATE db_dbnode
SET attributes = jsonb_set(attributes, '{"sealed"}', attributes->'_sealed')
WHERE attributes ? '_sealed' AND node_type LIKE 'process.%';
-- Copy `_sealed` -> `sealed`
UPDATE db_dbnode SET attributes = attributes - '_sealed'
WHERE attributes ? '_sealed' AND node_type LIKE 'process.%';
-- Delete `_sealed`
UPDATE db_dbnode SET attributes = attributes - '_finished'
WHERE attributes ? '_finished' AND node_type LIKE 'process.%';
-- Delete `_finished`
UPDATE db_dbnode SET attributes = attributes - '_failed'
WHERE attributes ? '_failed' AND node_type LIKE 'process.%';
-- Delete `_failed`
UPDATE db_dbnode SET attributes = attributes - '_aborted'
WHERE attributes ? '_aborted' AND node_type LIKE 'process.%';
-- Delete `_aborted`
UPDATE db_dbnode SET attributes = attributes - '_do_abort'
WHERE attributes ? '_do_abort' AND node_type LIKE 'process.%';
-- Delete `_do_abort`
UPDATE db_dbnode
SET attributes = jsonb_set(attributes, '{"sealed"}', to_jsonb(True))
WHERE
node_type LIKE 'process.%' AND
NOT (attributes ? 'sealed') AND
attributes->>'process_state' NOT IN ('created', 'running', 'waiting');
-- Set `sealed=True` for process nodes that do not yet have a `sealed` attribute AND are not in an active state
"""
)
conn.execute(statement)
def downgrade():
"""Migrations for the downgrade."""
raise NotImplementedError('Downgrade of django_0040.')
| [
"alembic.op.get_bind",
"sqlalchemy.text"
] | [((1517, 1530), 'alembic.op.get_bind', 'op.get_bind', ([], {}), '()\n', (1528, 1530), False, 'from alembic import op\n'), ((1548, 3035), 'sqlalchemy.text', 'sa.text', (['"""\n UPDATE db_dbnode\n SET attributes = jsonb_set(attributes, \'{"sealed"}\', attributes->\'_sealed\')\n WHERE attributes ? \'_sealed\' AND node_type LIKE \'process.%\';\n -- Copy `_sealed` -> `sealed`\n\n UPDATE db_dbnode SET attributes = attributes - \'_sealed\'\n WHERE attributes ? \'_sealed\' AND node_type LIKE \'process.%\';\n -- Delete `_sealed`\n\n UPDATE db_dbnode SET attributes = attributes - \'_finished\'\n WHERE attributes ? \'_finished\' AND node_type LIKE \'process.%\';\n -- Delete `_finished`\n\n UPDATE db_dbnode SET attributes = attributes - \'_failed\'\n WHERE attributes ? \'_failed\' AND node_type LIKE \'process.%\';\n -- Delete `_failed`\n\n UPDATE db_dbnode SET attributes = attributes - \'_aborted\'\n WHERE attributes ? \'_aborted\' AND node_type LIKE \'process.%\';\n -- Delete `_aborted`\n\n UPDATE db_dbnode SET attributes = attributes - \'_do_abort\'\n WHERE attributes ? \'_do_abort\' AND node_type LIKE \'process.%\';\n -- Delete `_do_abort`\n\n UPDATE db_dbnode\n SET attributes = jsonb_set(attributes, \'{"sealed"}\', to_jsonb(True))\n WHERE\n node_type LIKE \'process.%\' AND\n NOT (attributes ? \'sealed\') AND\n attributes->>\'process_state\' NOT IN (\'created\', \'running\', \'waiting\');\n -- Set `sealed=True` for process nodes that do not yet have a `sealed` attribute AND are not in an active state\n """'], {}), '(\n """\n UPDATE db_dbnode\n SET attributes = jsonb_set(attributes, \'{"sealed"}\', attributes->\'_sealed\')\n WHERE attributes ? \'_sealed\' AND node_type LIKE \'process.%\';\n -- Copy `_sealed` -> `sealed`\n\n UPDATE db_dbnode SET attributes = attributes - \'_sealed\'\n WHERE attributes ? \'_sealed\' AND node_type LIKE \'process.%\';\n -- Delete `_sealed`\n\n UPDATE db_dbnode SET attributes = attributes - \'_finished\'\n WHERE attributes ? \'_finished\' AND node_type LIKE \'process.%\';\n -- Delete `_finished`\n\n UPDATE db_dbnode SET attributes = attributes - \'_failed\'\n WHERE attributes ? \'_failed\' AND node_type LIKE \'process.%\';\n -- Delete `_failed`\n\n UPDATE db_dbnode SET attributes = attributes - \'_aborted\'\n WHERE attributes ? \'_aborted\' AND node_type LIKE \'process.%\';\n -- Delete `_aborted`\n\n UPDATE db_dbnode SET attributes = attributes - \'_do_abort\'\n WHERE attributes ? \'_do_abort\' AND node_type LIKE \'process.%\';\n -- Delete `_do_abort`\n\n UPDATE db_dbnode\n SET attributes = jsonb_set(attributes, \'{"sealed"}\', to_jsonb(True))\n WHERE\n node_type LIKE \'process.%\' AND\n NOT (attributes ? \'sealed\') AND\n attributes->>\'process_state\' NOT IN (\'created\', \'running\', \'waiting\');\n -- Set `sealed=True` for process nodes that do not yet have a `sealed` attribute AND are not in an active state\n """\n )\n', (1555, 3035), True, 'import sqlalchemy as sa\n')] |
# stub to allow changing the map without having to alter gta_model.sc
import os
mapPath = 'map.npz'
def setLocalMap(module, relpath):
global mapPath
base = os.path.dirname(module)
mapPath = os.path.join(base, relpath)
| [
"os.path.dirname",
"os.path.join"
] | [((168, 191), 'os.path.dirname', 'os.path.dirname', (['module'], {}), '(module)\n', (183, 191), False, 'import os\n'), ((206, 233), 'os.path.join', 'os.path.join', (['base', 'relpath'], {}), '(base, relpath)\n', (218, 233), False, 'import os\n')] |
"""Module exposing the `Matrices` and `MatricesMetric` class."""
from functools import reduce
import geomstats.backend as gs
from geomstats.geometry.euclidean import Euclidean
from geomstats.geometry.riemannian_metric import RiemannianMetric
TOLERANCE = 1e-5
class Matrices(Euclidean):
"""Class for the space of matrices (m, n)."""
def __init__(self, m, n):
assert isinstance(m, int) and isinstance(n, int) and m > 0 and n > 0
super(Matrices, self).__init__(dimension=m * n)
self.m = m
self.n = n
self.default_point_type = 'matrix'
self.metric = MatricesMetric(m, n)
def belongs(self, point):
"""Check if point belongs to the Matrix space."""
point = gs.to_ndarray(point, to_ndim=3)
_, mat_dim_1, mat_dim_2 = point.shape
return mat_dim_1 == self.m & mat_dim_2 == self.n
@staticmethod
def equal(mat_a, mat_b, atol=TOLERANCE):
"""
Test if matrices a and b are close.
Parameters
----------
mat_a : array-like, shape=[n_samples, dim1, dim2]
mat_b : array-like, shape=[n_samples, dim2, dim3]
Returns
-------
eq : array-like boolean, shape=[n_samples]
"""
is_vectorized = \
(gs.ndim(gs.array(mat_a)) == 3) or (gs.ndim(gs.array(mat_b)) == 3)
axes = (1, 2) if is_vectorized else (0, 1)
return gs.all(gs.isclose(mat_a, mat_b, atol=atol), axes)
@staticmethod
def mul(*args):
"""
Return the product of matrices a1, ..., an.
Parameters
----------
a1 : array-like, shape=[n_samples, dim_1, dim_2]
a2 : array-like, shape=[n_samples, dim_2, dim_3]
...
an : array-like, shape=[n_samples, dim_n-1, dim_n]
Returns
-------
mul : array-like, shape=[n_samples, dim_1, dim_n]
"""
return reduce(gs.matmul, args)
@classmethod
def bracket(cls, mat_a, mat_b):
"""
Return the commutator of a and b, i.e. `[a, b] = ab - ba`.
Parameters
----------
mat_a : array-like, shape=[n_samples, dim, dim]
mat_b : array-like, shape=[n_samples, dim, dim]
Returns
-------
mat_c : array-like, shape=[n_samples, dim, dim]
"""
return cls.mul(mat_a, mat_b) - cls.mul(mat_b, mat_a)
@staticmethod
def transpose(mat):
"""Return the transpose of matrices.
Parameters
----------
mat : array-like, shape=[n_samples, dim, dim]
Returns
-------
transpose : array-like, shape=[n_samples, dim, dim]
"""
is_vectorized = (gs.ndim(gs.array(mat)) == 3)
axes = (0, 2, 1) if is_vectorized else (1, 0)
return gs.transpose(mat, axes)
@classmethod
def is_symmetric(cls, mat, atol=TOLERANCE):
"""
Check if a matrix is symmetric.
Parameters
----------
mat : array-like, shape=[n_samples, n, n]
atol : float, absolute tolerance. defaults to TOLERANCE
Returns
-------
is_sym : array-like boolean, shape=[n_samples]
"""
return cls.equal(mat, cls.transpose(mat), atol)
@classmethod
def make_symmetric(cls, mat):
"""
Make a matrix symmetric, by averaging with its transpose.
Parameters
----------
mat : array-like, shape=[n_samples, n, n]
Returns
-------
sym : array-like, shape=[n_samples, n, n]
"""
return 1 / 2 * (mat + cls.transpose(mat))
def random_uniform(self, n_samples=1):
"""Generate n samples from a uniform distribution."""
point = gs.random.rand(n_samples, self.m, self.n)
return point
@classmethod
def congruent(cls, mat_1, mat_2):
"""Compute the congruent action of mat_2 on mat_1.
This is :math: `mat_2 mat_1 mat_2^T`.
Parameters
----------
mat_1 : array-like, shape=[n_samples, n, n]
mat_2 : array-like, shape=[n_samples, n, n]
Returns
-------
cong : array-like, shape=[n_samples, n, n]
"""
return cls.mul(mat_2, mat_1, cls.transpose(mat_2))
class MatricesMetric(RiemannianMetric):
"""Euclidean metric on matrices given by Frobenius inner product."""
def __init__(self, m, n):
dimension = m * n
super(MatricesMetric, self).__init__(
dimension=dimension,
signature=(dimension, 0, 0))
def inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None):
"""Compute Frobenius inner product of two tan vecs at `base_point`."""
tangent_vec_a = gs.to_ndarray(tangent_vec_a, to_ndim=3)
n_tangent_vecs_a, _, _ = tangent_vec_a.shape
tangent_vec_b = gs.to_ndarray(tangent_vec_b, to_ndim=3)
n_tangent_vecs_b, _, _ = tangent_vec_b.shape
assert n_tangent_vecs_a == n_tangent_vecs_b
inner_prod = gs.einsum("nij,nij->n", tangent_vec_a, tangent_vec_b)
inner_prod = gs.to_ndarray(inner_prod, to_ndim=1)
inner_prod = gs.to_ndarray(inner_prod, to_ndim=2, axis=1)
return inner_prod
| [
"geomstats.backend.random.rand",
"geomstats.backend.to_ndarray",
"functools.reduce",
"geomstats.backend.einsum",
"geomstats.backend.array",
"geomstats.backend.isclose",
"geomstats.backend.transpose"
] | [((735, 766), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['point'], {'to_ndim': '(3)'}), '(point, to_ndim=3)\n', (748, 766), True, 'import geomstats.backend as gs\n'), ((1907, 1930), 'functools.reduce', 'reduce', (['gs.matmul', 'args'], {}), '(gs.matmul, args)\n', (1913, 1930), False, 'from functools import reduce\n'), ((2786, 2809), 'geomstats.backend.transpose', 'gs.transpose', (['mat', 'axes'], {}), '(mat, axes)\n', (2798, 2809), True, 'import geomstats.backend as gs\n'), ((3723, 3764), 'geomstats.backend.random.rand', 'gs.random.rand', (['n_samples', 'self.m', 'self.n'], {}), '(n_samples, self.m, self.n)\n', (3737, 3764), True, 'import geomstats.backend as gs\n'), ((4718, 4757), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['tangent_vec_a'], {'to_ndim': '(3)'}), '(tangent_vec_a, to_ndim=3)\n', (4731, 4757), True, 'import geomstats.backend as gs\n'), ((4836, 4875), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['tangent_vec_b'], {'to_ndim': '(3)'}), '(tangent_vec_b, to_ndim=3)\n', (4849, 4875), True, 'import geomstats.backend as gs\n'), ((5004, 5057), 'geomstats.backend.einsum', 'gs.einsum', (['"""nij,nij->n"""', 'tangent_vec_a', 'tangent_vec_b'], {}), "('nij,nij->n', tangent_vec_a, tangent_vec_b)\n", (5013, 5057), True, 'import geomstats.backend as gs\n'), ((5079, 5115), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['inner_prod'], {'to_ndim': '(1)'}), '(inner_prod, to_ndim=1)\n', (5092, 5115), True, 'import geomstats.backend as gs\n'), ((5137, 5181), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['inner_prod'], {'to_ndim': '(2)', 'axis': '(1)'}), '(inner_prod, to_ndim=2, axis=1)\n', (5150, 5181), True, 'import geomstats.backend as gs\n'), ((1419, 1454), 'geomstats.backend.isclose', 'gs.isclose', (['mat_a', 'mat_b'], {'atol': 'atol'}), '(mat_a, mat_b, atol=atol)\n', (1429, 1454), True, 'import geomstats.backend as gs\n'), ((2696, 2709), 'geomstats.backend.array', 'gs.array', (['mat'], {}), '(mat)\n', (2704, 2709), True, 'import geomstats.backend as gs\n'), ((1288, 1303), 'geomstats.backend.array', 'gs.array', (['mat_a'], {}), '(mat_a)\n', (1296, 1303), True, 'import geomstats.backend as gs\n'), ((1323, 1338), 'geomstats.backend.array', 'gs.array', (['mat_b'], {}), '(mat_b)\n', (1331, 1338), True, 'import geomstats.backend as gs\n')] |
import gpsd
import json
import logging
import socket
import httpx
import paho.mqtt.client as mqtt
class MQTTReporter:
def __init__(self, name, mqtt_server=None, gps_server=None, compass=False):
self.name = name
self.mqtt_server = mqtt_server
self.compass = compass
self.gps_server = gps_server
self.mqttc = None
self.bearing = 'no bearing'
def connect(self):
logging.info(f'connecting to {self.mqtt_server}')
self.mqttc = mqtt.Client()
self.mqttc.connect(self.mqtt_server)
self.mqttc.loop_start()
if self.gps_server:
gpsd.connect(host=self.gps_server, port=2947)
def get_bearing(self):
try:
self.bearing = str(float(httpx.get(f'http://{self.gps_server}:8000/v1/').text))
except Exception as err:
logging.error('could not update bearing: %s', err)
def add_gps(self, publish_args):
if not self.gps_server:
return publish_args
publish_args.update({
'position': [0, 0],
'altitude': None,
'gps_time': None,
'map_url': None,
'bearing': self.bearing,
'gps': 'no fix'})
try:
if self.compass:
self.get_bearing()
packet = gpsd.get_current()
publish_args.update({
'position': packet.position(),
'altitude': packet.altitude(),
'gps_time': packet.get_time().timestamp(),
'map_url': packet.map_url(),
'bearing': self.bearing,
'gps': 'fix'})
except (gpsd.NoFixError, AttributeError) as err:
logging.error('could not update with GPS: %s', err)
return publish_args
def publish(self, publish_path, publish_args):
if not self.mqtt_server:
return
try:
if self.mqttc is None:
self.connect()
publish_args = self.add_gps(publish_args)
publish_args['name'] = self.name
self.mqttc.publish(publish_path, json.dumps(publish_args))
except (socket.gaierror, ConnectionRefusedError, mqtt.WebsocketConnectionError, ValueError) as err:
logging.error(f'failed to publish to MQTT {self.mqtt_server}: {err}')
| [
"paho.mqtt.client.Client",
"gpsd.connect",
"json.dumps",
"httpx.get",
"gpsd.get_current",
"logging.info",
"logging.error"
] | [((428, 477), 'logging.info', 'logging.info', (['f"""connecting to {self.mqtt_server}"""'], {}), "(f'connecting to {self.mqtt_server}')\n", (440, 477), False, 'import logging\n'), ((499, 512), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {}), '()\n', (510, 512), True, 'import paho.mqtt.client as mqtt\n'), ((630, 675), 'gpsd.connect', 'gpsd.connect', ([], {'host': 'self.gps_server', 'port': '(2947)'}), '(host=self.gps_server, port=2947)\n', (642, 675), False, 'import gpsd\n'), ((1323, 1341), 'gpsd.get_current', 'gpsd.get_current', ([], {}), '()\n', (1339, 1341), False, 'import gpsd\n'), ((854, 904), 'logging.error', 'logging.error', (['"""could not update bearing: %s"""', 'err'], {}), "('could not update bearing: %s', err)\n", (867, 904), False, 'import logging\n'), ((1715, 1766), 'logging.error', 'logging.error', (['"""could not update with GPS: %s"""', 'err'], {}), "('could not update with GPS: %s', err)\n", (1728, 1766), False, 'import logging\n'), ((2122, 2146), 'json.dumps', 'json.dumps', (['publish_args'], {}), '(publish_args)\n', (2132, 2146), False, 'import json\n'), ((2268, 2337), 'logging.error', 'logging.error', (['f"""failed to publish to MQTT {self.mqtt_server}: {err}"""'], {}), "(f'failed to publish to MQTT {self.mqtt_server}: {err}')\n", (2281, 2337), False, 'import logging\n'), ((754, 801), 'httpx.get', 'httpx.get', (['f"""http://{self.gps_server}:8000/v1/"""'], {}), "(f'http://{self.gps_server}:8000/v1/')\n", (763, 801), False, 'import httpx\n')] |
import os, sys, time, shutil, argparse
from functools import partial
import pickle
sys.path.append('../')
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import datasets, transforms
#import torchvision.models as models
import torch.optim as optim
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim as optim
import torch.multiprocessing as mp
from collections import OrderedDict
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx as torch_onnx
import onnx
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import lab2rgb
from skimage import io
# import prune_util
# from prune_util import GradualWarmupScheduler
# from prune_util import CrossEntropyLossMaybeSmooth
# from prune_util import mixup_data, mixup_criterion
# from utils import save_checkpoint, AverageMeter, visualize_image, GrayscaleImageFolder
# from model import ColorNet
#from wdsr_b import *
#from args import *
import captioning.utils.opts as opts
import captioning.models as models
import captioning.utils.misc as utils
import onnxruntime
def main():
use_gpu = torch.cuda.is_available()
# Create model
# models.resnet18(num_classes=365)
# model = ColorNet()
#args = get_args()
#model = MODEL(args)
# state_dict = torch.load("./checkpoint/checkpoint6/model_epoch133_step1.pth")
# new_state_dict = OrderedDict()
# for k, v in state_dict.items():
# k = k.replace('module.', '')
# new_state_dict[k] = v
# model = torch.nn.DataParallel(model)
# model.load_state_dict(new_state_dict)
parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--model', type=str, default='',
help='path to model to evaluate')
parser.add_argument('--cnn_model', type=str, default='resnet101',
help='resnet101, resnet152')
parser.add_argument('--infos_path', type=str, default='',
help='path to infos to evaluate')
parser.add_argument('--only_lang_eval', type=int, default=0,
help='lang eval on saved results')
parser.add_argument('--force', type=int, default=0,
help='force to evaluate no matter if there are results available')
opts.add_eval_options(parser)
opts.add_diversity_opts(parser)
opt = parser.parse_args()
opt.caption_model = 'newfc'
opt.infos_path = '/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/infos_fc_nsc-best.pkl'
with open(opt.infos_path, 'rb') as f:
infos = utils.pickle_load(f)
replace = ['input_fc_dir', 'input_att_dir', 'input_box_dir', 'input_label_h5', 'input_json', 'batch_size', 'id']
ignore = ['start_from']
for k in vars(infos['opt']).keys():
if k in replace:
setattr(opt, k, getattr(opt, k) or getattr(infos['opt'], k, ''))
elif k not in ignore:
if not k in vars(opt):
vars(opt).update({k: vars(infos['opt'])[k]}) # copy over options from model
vocab = infos['vocab'] # ix -> word mapping
opt.vocab = vocab
model = models.setup(opt)
checkpoint = torch.load("/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth")
model.load_state_dict(checkpoint)
# print(model)
#input_shape = (1, 256, 256)
cocotest_bu_fc_size = (10, 2048)
cocotest_bu_att_size = (10, 0, 0)
labels_size = (10, 5, 18)
masks_size = (10, 5, 18)
model_onnx_path = "./image_captioning.onnx"
model.train(False)
# Export the model to an ONNX file
# dummy_input = Variable(torch.randn(1, *input_shape))
# dummy_input = Variable(torch.randn(10, 2048), torch.randn(10, 0, 0), torch.randint(5200, (10, 5, 18)), torch.randint(1, (10, 5, 18)))
dummy_cocotest_bu_fc = Variable(torch.randn(10, 2048))
dummy_cocotest_bu_att = Variable(torch.randn(10, 0, 0))
dummy_labels = Variable(torch.randint(5200, (10, 5, 18)))
dummy_masks = Variable(torch.randint(1, (10, 5, 18)))
#output = torch_onnx.export(model, dummy_input, model_onnx_path, verbose=False)
output = torch_onnx.export(model, (dummy_cocotest_bu_fc, dummy_cocotest_bu_att, dummy_labels, dummy_masks), model_onnx_path, verbose=False)
print("Export of torch_model.onnx complete!")
def check():
parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--model', type=str, default='',
help='path to model to evaluate')
parser.add_argument('--cnn_model', type=str, default='resnet101',
help='resnet101, resnet152')
parser.add_argument('--infos_path', type=str, default='',
help='path to infos to evaluate')
parser.add_argument('--only_lang_eval', type=int, default=0,
help='lang eval on saved results')
parser.add_argument('--force', type=int, default=0,
help='force to evaluate no matter if there are results available')
opts.add_eval_options(parser)
opts.add_diversity_opts(parser)
opt = parser.parse_args()
opt.caption_model = 'newfc'
opt.infos_path = '/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/infos_fc_nsc-best.pkl'
with open(opt.infos_path, 'rb') as f:
infos = utils.pickle_load(f)
replace = ['input_fc_dir', 'input_att_dir', 'input_box_dir', 'input_label_h5', 'input_json', 'batch_size', 'id']
ignore = ['start_from']
for k in vars(infos['opt']).keys():
if k in replace:
setattr(opt, k, getattr(opt, k) or getattr(infos['opt'], k, ''))
elif k not in ignore:
if not k in vars(opt):
vars(opt).update({k: vars(infos['opt'])[k]}) # copy over options from model
vocab = infos['vocab'] # ix -> word mapping
opt.vocab = vocab
model = models.setup(opt)
checkpoint = torch.load("/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth")
model.load_state_dict(checkpoint)
# torch.nn.utils.remove_weight_norm(model.head[0])
# for i in range(2):
# for j in [0,2,3]:
# torch.nn.utils.remove_weight_norm(model.body[i].body[j])
# torch.nn.utils.remove_weight_norm(model.tail[0])
# torch.nn.utils.remove_weight_norm(model.skip[0])
model.eval()
ort_session = onnxruntime.InferenceSession("image_captioning.onnx")
dummy_cocotest_bu_fc = Variable(torch.randn(10, 2048))
dummy_cocotest_bu_att = Variable(torch.randn(10, 0, 0))
dummy_labels = Variable(torch.randint(5200, (10, 5, 18)))
dummy_masks = Variable(torch.randint(1, (10, 5, 18)))
x = (dummy_cocotest_bu_fc, dummy_cocotest_bu_att, dummy_labels, dummy_masks)
#x = torch.randn(1, 3, 392, 392, requires_grad=False)
#torch_out = model(x)
# # Load the ONNX model
# model = onnx.load("wdsr_b.onnx")
# # Check that the IR is well formed
# onnx.checker.check_model(model)
# # Print a human readable representation of the graph
# onnx.helper.printable_graph(model.graph)
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
if __name__ == '__main__':
main()
check()
| [
"argparse.ArgumentParser",
"captioning.utils.opts.add_eval_options",
"captioning.utils.misc.pickle_load",
"torch.load",
"onnxruntime.InferenceSession",
"torch.randn",
"captioning.models.setup",
"torch.randint",
"torch.cuda.is_available",
"sys.path.append",
"captioning.utils.opts.add_diversity_op... | [((83, 105), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (98, 105), False, 'import os, sys, time, shutil, argparse\n'), ((1188, 1213), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1211, 1213), False, 'import torch\n'), ((1678, 1703), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1701, 1703), False, 'import os, sys, time, shutil, argparse\n'), ((2316, 2345), 'captioning.utils.opts.add_eval_options', 'opts.add_eval_options', (['parser'], {}), '(parser)\n', (2337, 2345), True, 'import captioning.utils.opts as opts\n'), ((2350, 2381), 'captioning.utils.opts.add_diversity_opts', 'opts.add_diversity_opts', (['parser'], {}), '(parser)\n', (2373, 2381), True, 'import captioning.utils.opts as opts\n'), ((3154, 3171), 'captioning.models.setup', 'models.setup', (['opt'], {}), '(opt)\n', (3166, 3171), True, 'import captioning.models as models\n'), ((3190, 3279), 'torch.load', 'torch.load', (['"""/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth"""'], {}), "(\n '/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth')\n", (3200, 3279), False, 'import torch\n'), ((4146, 4280), 'torch.onnx.export', 'torch_onnx.export', (['model', '(dummy_cocotest_bu_fc, dummy_cocotest_bu_att, dummy_labels, dummy_masks)', 'model_onnx_path'], {'verbose': '(False)'}), '(model, (dummy_cocotest_bu_fc, dummy_cocotest_bu_att,\n dummy_labels, dummy_masks), model_onnx_path, verbose=False)\n', (4163, 4280), True, 'import torch.onnx as torch_onnx\n'), ((4355, 4380), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4378, 4380), False, 'import os, sys, time, shutil, argparse\n'), ((4993, 5022), 'captioning.utils.opts.add_eval_options', 'opts.add_eval_options', (['parser'], {}), '(parser)\n', (5014, 5022), True, 'import captioning.utils.opts as opts\n'), ((5027, 5058), 'captioning.utils.opts.add_diversity_opts', 'opts.add_diversity_opts', (['parser'], {}), '(parser)\n', (5050, 5058), True, 'import captioning.utils.opts as opts\n'), ((5831, 5848), 'captioning.models.setup', 'models.setup', (['opt'], {}), '(opt)\n', (5843, 5848), True, 'import captioning.models as models\n'), ((5867, 5956), 'torch.load', 'torch.load', (['"""/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth"""'], {}), "(\n '/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth')\n", (5877, 5956), False, 'import torch\n'), ((6316, 6369), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['"""image_captioning.onnx"""'], {}), "('image_captioning.onnx')\n", (6344, 6369), False, 'import onnxruntime\n'), ((2603, 2623), 'captioning.utils.misc.pickle_load', 'utils.pickle_load', (['f'], {}), '(f)\n', (2620, 2623), True, 'import captioning.utils.misc as utils\n'), ((3846, 3867), 'torch.randn', 'torch.randn', (['(10)', '(2048)'], {}), '(10, 2048)\n', (3857, 3867), False, 'import torch\n'), ((3906, 3927), 'torch.randn', 'torch.randn', (['(10)', '(0)', '(0)'], {}), '(10, 0, 0)\n', (3917, 3927), False, 'import torch\n'), ((3957, 3989), 'torch.randint', 'torch.randint', (['(5200)', '(10, 5, 18)'], {}), '(5200, (10, 5, 18))\n', (3970, 3989), False, 'import torch\n'), ((4018, 4047), 'torch.randint', 'torch.randint', (['(1)', '(10, 5, 18)'], {}), '(1, (10, 5, 18))\n', (4031, 4047), False, 'import torch\n'), ((5280, 5300), 'captioning.utils.misc.pickle_load', 'utils.pickle_load', (['f'], {}), '(f)\n', (5297, 5300), True, 'import captioning.utils.misc as utils\n'), ((6407, 6428), 'torch.randn', 'torch.randn', (['(10)', '(2048)'], {}), '(10, 2048)\n', (6418, 6428), False, 'import torch\n'), ((6467, 6488), 'torch.randn', 'torch.randn', (['(10)', '(0)', '(0)'], {}), '(10, 0, 0)\n', (6478, 6488), False, 'import torch\n'), ((6518, 6550), 'torch.randint', 'torch.randint', (['(5200)', '(10, 5, 18)'], {}), '(5200, (10, 5, 18))\n', (6531, 6550), False, 'import torch\n'), ((6579, 6608), 'torch.randint', 'torch.randint', (['(1)', '(10, 5, 18)'], {}), '(1, (10, 5, 18))\n', (6592, 6608), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-27 14:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Accession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, db_index=True, max_length=255, null=True)),
('country', models.CharField(blank=True, max_length=255, null=True)),
('sitename', models.TextField(blank=True, null=True)),
('collector', models.TextField(blank=True, null=True)),
('collection_date', models.DateTimeField(blank=True, null=True)),
('longitude', models.FloatField(blank=True, db_index=True, null=True)),
('latitude', models.FloatField(blank=True, db_index=True, null=True)),
('cs_number', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(blank=True, max_length=100, null=True)),
('lastname', models.CharField(blank=True, db_index=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='ObservationUnit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('accession', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Accession')),
],
),
migrations.CreateModel(
name='OntologySource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('acronym', models.CharField(max_length=50)),
('name', models.CharField(max_length=255)),
('url', models.URLField()),
],
),
migrations.CreateModel(
name='OntologyTerm',
fields=[
('id', models.CharField(max_length=50, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('definition', models.TextField(blank=True, null=True)),
('comment', models.TextField(blank=True, null=True)),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.OntologySource')),
],
),
migrations.CreateModel(
name='Phenotype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('doi', models.CharField(blank=True, db_index=True, max_length=255, null=True)),
('name', models.CharField(db_index=True, max_length=255)),
('scoring', models.TextField(blank=True, null=True)),
('source', models.TextField(blank=True, null=True)),
('type', models.CharField(blank=True, max_length=255, null=True)),
('growth_conditions', models.TextField(blank=True, null=True)),
('shapiro_test_statistic', models.FloatField(blank=True, null=True)),
('shapiro_p_value', models.FloatField(blank=True, null=True)),
('number_replicates', models.IntegerField(default=0)),
('integration_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='PhenotypeMetaDynamic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phenotype_meta_field', models.CharField(db_index=True, max_length=255)),
('phenotype_meta_value', models.TextField()),
('phenotype_public', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Phenotype')),
],
),
migrations.CreateModel(
name='PhenotypeValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField()),
('obs_unit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.ObservationUnit')),
('phenotype', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Phenotype')),
],
),
migrations.CreateModel(
name='Publication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author_order', models.TextField()),
('publication_tag', models.CharField(max_length=255)),
('pub_year', models.IntegerField(blank=True, null=True)),
('title', models.CharField(db_index=True, max_length=255)),
('journal', models.CharField(max_length=255)),
('volume', models.CharField(blank=True, max_length=255, null=True)),
('pages', models.CharField(blank=True, max_length=255, null=True)),
('doi', models.CharField(blank=True, db_index=True, max_length=255, null=True)),
('pubmed_id', models.CharField(blank=True, db_index=True, max_length=255, null=True)),
('authors', models.ManyToManyField(to='phenotypedb.Author')),
],
),
migrations.CreateModel(
name='Species',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ncbi_id', models.IntegerField(blank=True, null=True)),
('genus', models.CharField(max_length=255)),
('species', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Study',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('publications', models.ManyToManyField(blank=True, to='phenotypedb.Publication')),
('species', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Species')),
],
),
migrations.AddField(
model_name='phenotype',
name='dynamic_metainformations',
field=models.ManyToManyField(to='phenotypedb.PhenotypeMetaDynamic'),
),
migrations.AddField(
model_name='phenotype',
name='eo_term',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='eo_term', to='phenotypedb.OntologyTerm'),
),
migrations.AddField(
model_name='phenotype',
name='species',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Species'),
),
migrations.AddField(
model_name='phenotype',
name='study',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Study'),
),
migrations.AddField(
model_name='phenotype',
name='to_term',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='to_term', to='phenotypedb.OntologyTerm'),
),
migrations.AddField(
model_name='phenotype',
name='uo_term',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='uo_term', to='phenotypedb.OntologyTerm'),
),
migrations.AddField(
model_name='observationunit',
name='study',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Study'),
),
migrations.AddField(
model_name='accession',
name='species',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Species'),
),
]
| [
"django.db.models.FloatField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.URLField",
"django.db.models.CharField"
] | [((7170, 7231), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""phenotypedb.PhenotypeMetaDynamic"""'}), "(to='phenotypedb.PhenotypeMetaDynamic')\n", (7192, 7231), False, 'from django.db import migrations, models\n'), ((7355, 7500), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""eo_term"""', 'to': '"""phenotypedb.OntologyTerm"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='eo_term', to='phenotypedb.OntologyTerm')\n", (7372, 7500), False, 'from django.db import migrations, models\n'), ((7619, 7712), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""phenotypedb.Species"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'phenotypedb.Species')\n", (7636, 7712), False, 'from django.db import migrations, models\n'), ((7829, 7920), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""phenotypedb.Study"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'phenotypedb.Study')\n", (7846, 7920), False, 'from django.db import migrations, models\n'), ((8039, 8184), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""to_term"""', 'to': '"""phenotypedb.OntologyTerm"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='to_term', to='phenotypedb.OntologyTerm')\n", (8056, 8184), False, 'from django.db import migrations, models\n'), ((8303, 8448), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""uo_term"""', 'to': '"""phenotypedb.OntologyTerm"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='uo_term', to='phenotypedb.OntologyTerm')\n", (8320, 8448), False, 'from django.db import migrations, models\n'), ((8571, 8662), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""phenotypedb.Study"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'phenotypedb.Study')\n", (8588, 8662), False, 'from django.db import migrations, models\n'), ((8781, 8874), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""phenotypedb.Species"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'phenotypedb.Species')\n", (8798, 8874), False, 'from django.db import migrations, models\n'), ((402, 495), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (418, 495), False, 'from django.db import migrations, models\n'), ((519, 589), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'db_index': '(True)', 'max_length': '(255)', 'null': '(True)'}), '(blank=True, db_index=True, max_length=255, null=True)\n', (535, 589), False, 'from django.db import migrations, models\n'), ((620, 675), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'null': '(True)'}), '(blank=True, max_length=255, null=True)\n', (636, 675), False, 'from django.db import migrations, models\n'), ((707, 746), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (723, 746), False, 'from django.db import migrations, models\n'), ((779, 818), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (795, 818), False, 'from django.db import migrations, models\n'), ((857, 900), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (877, 900), False, 'from django.db import migrations, models\n'), ((933, 988), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'db_index': '(True)', 'null': '(True)'}), '(blank=True, db_index=True, null=True)\n', (950, 988), False, 'from django.db import migrations, models\n'), ((1020, 1075), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'db_index': '(True)', 'null': '(True)'}), '(blank=True, db_index=True, null=True)\n', (1037, 1075), False, 'from django.db import migrations, models\n'), ((1108, 1163), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'null': '(True)'}), '(blank=True, max_length=255, null=True)\n', (1124, 1163), False, 'from django.db import migrations, models\n'), ((1295, 1388), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1311, 1388), False, 'from django.db import migrations, models\n'), ((1417, 1472), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)'}), '(blank=True, max_length=100, null=True)\n', (1433, 1472), False, 'from django.db import migrations, models\n'), ((1504, 1574), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'db_index': '(True)', 'max_length': '(200)', 'null': '(True)'}), '(blank=True, db_index=True, max_length=200, null=True)\n', (1520, 1574), False, 'from django.db import migrations, models\n'), ((1715, 1808), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1731, 1808), False, 'from django.db import migrations, models\n'), ((1837, 1932), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""phenotypedb.Accession"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'phenotypedb.Accession')\n", (1854, 1932), False, 'from django.db import migrations, models\n'), ((2067, 2160), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2083, 2160), False, 'from django.db import migrations, models\n'), ((2187, 2218), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (2203, 2218), False, 'from django.db import migrations, models\n'), ((2246, 2278), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2262, 2278), False, 'from django.db import migrations, models\n'), ((2305, 2322), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (2320, 2322), False, 'from django.db import migrations, models\n'), ((2460, 2526), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'primary_key': '(True)', 'serialize': '(False)'}), '(max_length=50, primary_key=True, serialize=False)\n', (2476, 2526), False, 'from django.db import migrations, models\n'), ((2554, 2586), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2570, 2586), False, 'from django.db import migrations, models\n'), ((2620, 2659), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2636, 2659), False, 'from django.db import migrations, models\n'), ((2690, 2729), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2706, 2729), False, 'from django.db import migrations, models\n'), ((2759, 2859), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""phenotypedb.OntologySource"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'phenotypedb.OntologySource')\n", (2776, 2859), False, 'from django.db import migrations, models\n'), ((2989, 3082), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3005, 3082), False, 'from django.db import migrations, models\n'), ((3105, 3175), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'db_index': '(True)', 'max_length': '(255)', 'null': '(True)'}), '(blank=True, db_index=True, max_length=255, null=True)\n', (3121, 3175), False, 'from django.db import migrations, models\n'), ((3203, 3250), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(255)'}), '(db_index=True, max_length=255)\n', (3219, 3250), False, 'from django.db import migrations, models\n'), ((3281, 3320), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3297, 3320), False, 'from django.db import migrations, models\n'), ((3350, 3389), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3366, 3389), False, 'from django.db import migrations, models\n'), ((3417, 3472), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'null': '(True)'}), '(blank=True, max_length=255, null=True)\n', (3433, 3472), False, 'from django.db import migrations, models\n'), ((3513, 3552), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3529, 3552), False, 'from django.db import migrations, models\n'), ((3598, 3638), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3615, 3638), False, 'from django.db import migrations, models\n'), ((3677, 3717), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3694, 3717), False, 'from django.db import migrations, models\n'), ((3758, 3788), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (3777, 3788), False, 'from django.db import migrations, models\n'), ((3828, 3867), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3848, 3867), False, 'from django.db import migrations, models\n'), ((4013, 4106), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (4029, 4106), False, 'from django.db import migrations, models\n'), ((4146, 4193), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(255)'}), '(db_index=True, max_length=255)\n', (4162, 4193), False, 'from django.db import migrations, models\n'), ((4237, 4255), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (4253, 4255), False, 'from django.db import migrations, models\n'), ((4295, 4413), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""phenotypedb.Phenotype"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='phenotypedb.Phenotype')\n", (4312, 4413), False, 'from django.db import migrations, models\n'), ((4548, 4641), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (4564, 4641), False, 'from django.db import migrations, models\n'), ((4666, 4685), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (4683, 4685), False, 'from django.db import migrations, models\n'), ((4717, 4818), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""phenotypedb.ObservationUnit"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'phenotypedb.ObservationUnit')\n", (4734, 4818), False, 'from django.db import migrations, models\n'), ((4846, 4941), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""phenotypedb.Phenotype"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'phenotypedb.Phenotype')\n", (4863, 4941), False, 'from django.db import migrations, models\n'), ((5073, 5166), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (5089, 5166), False, 'from django.db import migrations, models\n'), ((5198, 5216), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (5214, 5216), False, 'from django.db import migrations, models\n'), ((5255, 5287), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5271, 5287), False, 'from django.db import migrations, models\n'), ((5319, 5361), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5338, 5361), False, 'from django.db import migrations, models\n'), ((5390, 5437), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(255)'}), '(db_index=True, max_length=255)\n', (5406, 5437), False, 'from django.db import migrations, models\n'), ((5468, 5500), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5484, 5500), False, 'from django.db import migrations, models\n'), ((5530, 5585), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'null': '(True)'}), '(blank=True, max_length=255, null=True)\n', (5546, 5585), False, 'from django.db import migrations, models\n'), ((5614, 5669), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'null': '(True)'}), '(blank=True, max_length=255, null=True)\n', (5630, 5669), False, 'from django.db import migrations, models\n'), ((5696, 5766), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'db_index': '(True)', 'max_length': '(255)', 'null': '(True)'}), '(blank=True, db_index=True, max_length=255, null=True)\n', (5712, 5766), False, 'from django.db import migrations, models\n'), ((5799, 5869), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'db_index': '(True)', 'max_length': '(255)', 'null': '(True)'}), '(blank=True, db_index=True, max_length=255, null=True)\n', (5815, 5869), False, 'from django.db import migrations, models\n'), ((5900, 5947), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""phenotypedb.Author"""'}), "(to='phenotypedb.Author')\n", (5922, 5947), False, 'from django.db import migrations, models\n'), ((6080, 6173), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (6096, 6173), False, 'from django.db import migrations, models\n'), ((6200, 6242), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (6219, 6242), False, 'from django.db import migrations, models\n'), ((6271, 6303), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (6287, 6303), False, 'from django.db import migrations, models\n'), ((6334, 6366), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (6350, 6366), False, 'from django.db import migrations, models\n'), ((6401, 6440), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (6417, 6440), False, 'from django.db import migrations, models\n'), ((6571, 6664), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (6587, 6664), False, 'from django.db import migrations, models\n'), ((6688, 6720), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (6704, 6720), False, 'from django.db import migrations, models\n'), ((6755, 6794), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (6771, 6794), False, 'from django.db import migrations, models\n'), ((6830, 6894), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'to': '"""phenotypedb.Publication"""'}), "(blank=True, to='phenotypedb.Publication')\n", (6852, 6894), False, 'from django.db import migrations, models\n'), ((6925, 7018), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""phenotypedb.Species"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'phenotypedb.Species')\n", (6942, 7018), False, 'from django.db import migrations, models\n')] |
from release_libraries import LibraryParameters
from bam_finder import getBamPath, library_default_dir, MT_default_dir, ShopVersion
import argparse
import re
from has_read_groups import read_group_checks
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Augment the bam list for a release with a prior existing version of the library")
parser.add_argument("bam_list", help="Each line contains the parameters to build a library bam for release. This includes the library ID, the individual ID, experiment, read group description (sequencing run name with experiment type and udg treatment), experiment, and (bam, sequencing run date) pairs ")
args = parser.parse_args()
with open(args.bam_list) as f:
library_parameters = [LibraryParameters(line) for line in f]
for x in library_parameters:
experiment = x.experiment
if '1240k' in experiment:
experiment = '1240k'
search_directory = MT_default_dir if x.reference == 'rsrs' else library_default_dir
existingBAM = getBamPath(x.library_id, experiment=experiment, reference=x.reference, version_policy='latest', shop_parent_directory=search_directory)
bam = str(existingBAM)
#print(bam)
if len(bam) > 0:
try: # this will match a new pipeline bam
match = re.search('v([0-9]+).bam', bam)
new_version = int(match.group(1)) + 1
has_read_groups, has_real_library_name, date_string = read_group_checks(bam)
except: # if the existing version is Shop's
new_version = 1
shop = ShopVersion(bam)
date_string = shop.date_string
#print('{}\t{}\t{:d}'.format(x.library_id, bam, new_version))
x.version = new_version
x.bam_filenames.append(str(existingBAM))
x.bam_date_strings.append(date_string) # the bam date string is used for generating read groups, which the existing bam does not need
#print('{}\t{}'.format(x.library_id, bam))
print(x)
| [
"has_read_groups.read_group_checks",
"argparse.ArgumentParser",
"bam_finder.ShopVersion",
"bam_finder.getBamPath",
"release_libraries.LibraryParameters",
"re.search"
] | [((242, 370), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Augment the bam list for a release with a prior existing version of the library"""'}), "(description=\n 'Augment the bam list for a release with a prior existing version of the library'\n )\n", (265, 370), False, 'import argparse\n'), ((1005, 1144), 'bam_finder.getBamPath', 'getBamPath', (['x.library_id'], {'experiment': 'experiment', 'reference': 'x.reference', 'version_policy': '"""latest"""', 'shop_parent_directory': 'search_directory'}), "(x.library_id, experiment=experiment, reference=x.reference,\n version_policy='latest', shop_parent_directory=search_directory)\n", (1015, 1144), False, 'from bam_finder import getBamPath, library_default_dir, MT_default_dir, ShopVersion\n'), ((753, 776), 'release_libraries.LibraryParameters', 'LibraryParameters', (['line'], {}), '(line)\n', (770, 776), False, 'from release_libraries import LibraryParameters\n'), ((1256, 1287), 're.search', 're.search', (['"""v([0-9]+).bam"""', 'bam'], {}), "('v([0-9]+).bam', bam)\n", (1265, 1287), False, 'import re\n'), ((1388, 1410), 'has_read_groups.read_group_checks', 'read_group_checks', (['bam'], {}), '(bam)\n', (1405, 1410), False, 'from has_read_groups import read_group_checks\n'), ((1489, 1505), 'bam_finder.ShopVersion', 'ShopVersion', (['bam'], {}), '(bam)\n', (1500, 1505), False, 'from bam_finder import getBamPath, library_default_dir, MT_default_dir, ShopVersion\n')] |
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
plt.ion()
bands = [2,3]
single_channel_readout = 2
nsamp = 2**25
new_chans = False
def etaPhaseModDegree(etaPhase):
return (etaPhase+180)%360-180
#For resonator I/Q high sampled data use eta_mag + eta_phase found in eta scans for Q and +/- 90 deg for I, for off resonance data to look at HEMT, etc set eta_mag = 1 and eta_phase = 0 & 90 or the eta_phase from the closest resonator for "Q" and that +/- 90 for "I"
#In single_channel_readout mode 2 you take data at 2.4MHz and don't need to worry about decimation & filter_alpha, for single_channel_reaout = 1 600 kHz data you do, see confluence page https://confluence.slac.stanford.edu/display/SMuRF/SMuRF+firmware#SMuRFfirmware-Datamodes
if new_chans == True:
chans = {}
freqs = {}
sbs = {}
eta_mags_scaled = {}
eta_phases = {}
for band in bands:
chans[band] = S.which_on(band)
freqs[band] = []
sbs[band] = []
eta_mags_scaled[band] = []
eta_phases[band] = []
for chan in chans[band]:
freqs[band].append(S.channel_to_freq(band,chan))
sbs[band].append(S.freq_to_subband(band,S.channel_to_freq(band,chan))[0])
eta_mags_scaled[band].append(S.get_eta_mag_scaled_channel(band,chan))
eta_phases[band].append(S.get_eta_phase_degree_channel(band,chan))
S.channel_off(band,chan)
freqs[band] = np.asarray(freqs[band])
sbs[band] = np.asarray(sbs[band])
eta_mags_scaled[band] = np.asarray(eta_mags_scaled[band])
eta_phases[band] = np.asarray(eta_phases[band])
for band in bands:
for i,chan in enumerate(chans[band]):
plt.figure()
S.set_fixed_tone(freqs[band][i],12)
S.set_feedback_enable(band,0)
#S.run_serial_gradient_descent(band)
#S.run_serial_eta_scan(band)
S.flux_ramp_off()
#qEtaPhaseDegree = eta_phases[band][i]
qEtaPhaseDegree = 0
#EtaMag = eta_mags_scaled[band][i]
EtaMag = 1
channel = S.which_on(band)[0]
S.set_eta_mag_scaled_channel(band,channel,EtaMag)
alpha = 1.0
for IorQ in ['Q0','Q+','I+','I-']:
if IorQ is 'Q0':
S.set_eta_phase_degree_channel(band,channel,qEtaPhaseDegree)
if IorQ is 'Q+':
S.set_eta_phase_degree_channel(band,channel,etaPhaseModDegree(qEtaPhaseDegree+180))
if IorQ is 'I+':
S.set_eta_phase_degree_channel(band,channel,etaPhaseModDegree(qEtaPhaseDegree+90))
if IorQ is 'I-':
S.set_eta_phase_degree_channel(band,channel,etaPhaseModDegree(qEtaPhaseDegree-90))
ctime1=int(S.get_timestamp())
filename='%d.dat'%ctime1
# take ~56 sec of data (18750 Hz)^-1 * (2^20) ~ 55.9sec. Have to set kludge_sec=60.
f, df, sync = S.take_debug_data(band, channel=channel, IQstream=False, single_channel_readout=single_channel_readout, nsamp=nsamp,filename=str(ctime1));
f,Pxx = signal.welch(df,nperseg = 2**16,fs=2.4e6)
Pxx = np.sqrt(Pxx)
plt.loglog(f,Pxx,alpha=alpha,label = IorQ+': '+str(ctime1))
alpha = alpha*0.8
#dfs.append(df)
#data=fmt.format([str(ctime1),'%0.6f'%(S.channel_to_freq(band,channel)),filename,IorQ])
#of.write(data)
#of.flush()
plt.xlabel('Frequency [Hz]',fontsize = 16)
plt.ylabel('I/Q Noise',fontsize = 16)
plt.title('Resonator at '+str(np.round(freqs[band][i],1))+ 'MHz')
plt.legend()
plt.show()
plt.savefig(S.plot_dir+'/'+str(ctime1)+'_band_'+str(band)+'_chan_'+str(chan)+'.png')
plt.close()
S.channel_off(band,channel)
S.flux_ramp_on()
| [
"scipy.signal.welch",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.round",
"matplotlib.pyplot.xlabel",
"numpy.asarray",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((81, 90), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (88, 90), True, 'import matplotlib.pyplot as plt\n'), ((1465, 1488), 'numpy.asarray', 'np.asarray', (['freqs[band]'], {}), '(freqs[band])\n', (1475, 1488), True, 'import numpy as np\n'), ((1509, 1530), 'numpy.asarray', 'np.asarray', (['sbs[band]'], {}), '(sbs[band])\n', (1519, 1530), True, 'import numpy as np\n'), ((1563, 1596), 'numpy.asarray', 'np.asarray', (['eta_mags_scaled[band]'], {}), '(eta_mags_scaled[band])\n', (1573, 1596), True, 'import numpy as np\n'), ((1624, 1652), 'numpy.asarray', 'np.asarray', (['eta_phases[band]'], {}), '(eta_phases[band])\n', (1634, 1652), True, 'import numpy as np\n'), ((1723, 1735), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1733, 1735), True, 'import matplotlib.pyplot as plt\n'), ((3437, 3478), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency [Hz]"""'], {'fontsize': '(16)'}), "('Frequency [Hz]', fontsize=16)\n", (3447, 3478), True, 'import matplotlib.pyplot as plt\n'), ((3488, 3524), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""I/Q Noise"""'], {'fontsize': '(16)'}), "('I/Q Noise', fontsize=16)\n", (3498, 3524), True, 'import matplotlib.pyplot as plt\n'), ((3608, 3620), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3618, 3620), True, 'import matplotlib.pyplot as plt\n'), ((3629, 3639), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3637, 3639), True, 'import matplotlib.pyplot as plt\n'), ((3741, 3752), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3750, 3752), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3121), 'scipy.signal.welch', 'signal.welch', (['df'], {'nperseg': '(2 ** 16)', 'fs': '(2400000.0)'}), '(df, nperseg=2 ** 16, fs=2400000.0)\n', (3086, 3121), True, 'import scipy.signal as signal\n'), ((3134, 3146), 'numpy.sqrt', 'np.sqrt', (['Pxx'], {}), '(Pxx)\n', (3141, 3146), True, 'import numpy as np\n'), ((3564, 3591), 'numpy.round', 'np.round', (['freqs[band][i]', '(1)'], {}), '(freqs[band][i], 1)\n', (3572, 3591), True, 'import numpy as np\n')] |
from typing import List, Tuple
import numpy as np
import pymeshfix
import trimesh.voxel.creation
from skimage.measure import marching_cubes
from trimesh import Trimesh
from trimesh.smoothing import filter_taubin
from ..types import BinaryImage, LabelImage
def _round_to_pitch(coordinate: np.ndarray, pitch: float) -> np.ndarray:
"""Round a point to the nearest point on a grid that starts at the origin
with a specified pitch.
Parameters
----------
coordinate : np.ndarray
The coordinate to round
pitch : float
The pitch of the grid. Assumed to the be same in all directions.
Returns
-------
rounded_point : np.ndarray
The point after rounding to the nearest grid point.
"""
return pitch * np.round(coordinate / pitch, decimals=0)
def repair_mesh(mesh: Trimesh) -> Trimesh:
"""Repair a mesh using pymeshfix.
Parameters
----------
mesh : Trimesh
The mesh to be repaired
"""
vertices = np.asarray(mesh.vertices)
faces = np.asarray(mesh.faces)
vertices_clean, faces_clean = pymeshfix.clean_from_arrays(vertices, faces)
# create the mesh object
repaired_mesh = Trimesh(vertices=vertices_clean, faces=faces_clean)
assert repaired_mesh.is_watertight, "Mesh was unable to be repaired"
return repaired_mesh
def binary_mask_to_surface(
object_mask: BinaryImage, n_mesh_smoothing_interations: int = 50
) -> Trimesh:
"""Convert surface of a 3D binary mask (segmented object) into a watertight mesh.
Parameters
----------
object_mask : BinaryMask
A 3D binary image corresponding to the object you want to mesh.
n_mesh_smoothing_interations : int
The number of interations of smooting to perform. Smoothing is
done by the trimesh taubin filter:
https://trimsh.org/trimesh.smoothing.html#trimesh.smoothing.filter_taubin
Default value is 50.
Returns
-------
mesh : trimesh.Trimesh
The resulting mesh as a trimesh.Trimesh object.
https://trimsh.org/trimesh.base.html#github-com-mikedh-trimesh
"""
vertices, faces, _, _ = marching_cubes(object_mask, 0)
vertices_clean, faces_clean = pymeshfix.clean_from_arrays(vertices, faces)
# create the mesh object
mesh = Trimesh(vertices=vertices_clean, faces=faces_clean)
# optionally clean up the mesh
if n_mesh_smoothing_interations > 0:
filter_taubin(mesh, iterations=n_mesh_smoothing_interations)
return mesh
def voxelize_closed_surface(
mesh: Trimesh, pitch: float, repair_mesh: bool = True
) -> Tuple[BinaryImage, np.ndarray]:
"""Voxelize a closed surface mesh.
Parameters
----------
mesh : Trimesh
The surface to voxelize
pitch : float
The voxel width in mesh units. Voxels have the
same width in each dimension (i.e., are cubes).
repair_mesh : bool
Flag to attept to repair the mesh if set to True.
Default value is True.
Returns
-------
image : BinaryImage
The binary mask created from the
image_origin : np.ndarray
The upper left hand corner of the voxelized image in mesh units
(i.e., minimun of the axis aligned bounding box)
"""
bounding_box = mesh.bounds
centroid = np.mean(bounding_box, axis=0)
# convert the centroid to the nearest integer multiple of the pitch
rounded_centroid = _round_to_pitch(coordinate=centroid, pitch=pitch)
# find the minimum cube half-width that encompases the full mesh
cube_half_width = np.max(bounding_box - rounded_centroid)
# get the number of voxels for the cube half-width
n_voxels_cube_half_width = int(np.ceil(cube_half_width / pitch))
# pad with one voxel on each side to make sure the full mesh is in range
n_voxels_cube_half_width += 1
# get the upper left hand (i.e., minimum) corner of the voxelized image in mesh coordinates
image_origin = rounded_centroid - (n_voxels_cube_half_width * pitch)
# if and (not mesh.is_watertight):
# mesh = repair_mesh(mesh)
voxel_grid = trimesh.voxel.creation.local_voxelize(
mesh=mesh,
point=rounded_centroid,
pitch=pitch,
radius=n_voxels_cube_half_width,
fill=True,
)
return voxel_grid.matrix.astype(bool), image_origin
def closed_surfaces_to_label_image(
meshes: List[Trimesh],
pitch: float,
crop_around_mesh: bool = False,
repair_mesh: bool = False,
) -> Tuple[LabelImage, np.ndarray]:
"""Create a label image from a set of meshes with closed surfaces.
Notes:
- meshes must be water tight for accurate voxelization.
- Labels are assigned in the order the meshes appear in the list.
- all meshes must be in the same coordinate system and scale.
Parameters
----------
meshes : List[Trimesh]
The meshes to convert to a label image.
pitch : float
The width of a voxel in mesh units. Voxels are assumed to be cubes.
crop_around_mesh : bool
When set to True, the image is cropped around the axis aligned bounding box
of the set of meshes with a one voxel pad in each direction.
The default value is False
repair_mesh : bool
When set to True, will attempt to repair meshes with PyMeshFix.
Default value is False.
Returns
-------
label_image : LabelImage
The label image generated from the meshes.
image_origin : np.ndarray
The coordinate of the upper left hand corner (i.e., minimum) of the
label_image in mesh coordinates.
"""
# get the bounding box around the meshes
bounding_boxes = [mesh.bounds for mesh in meshes]
# get the bounding box around all of them
all_corners = np.concatenate(bounding_boxes, axis=0)
min_corner = np.min(all_corners, axis=0)
max_corner = np.max(all_corners, axis=0)
# round the corners to the nearest voxel (in mesh coordinates)
min_corner_rounded = _round_to_pitch(coordinate=min_corner, pitch=pitch)
max_corner_rounded = _round_to_pitch(coordinate=max_corner, pitch=pitch)
# pad the bounding box to make sure everything is accounted for
min_corner_rounded -= pitch
max_corner_rounded += pitch
if crop_around_mesh is True:
image_origin = min_corner_rounded
else:
image_origin = np.array([0, 0, 0])
# determine the size of the image in pixels
image_shape_mesh_units = max_corner_rounded - image_origin
image_shape_voxels = np.round(image_shape_mesh_units / pitch, decimals=0).astype(
int
)
# create the blank label image
label_image = np.zeros(image_shape_voxels, dtype=np.uint16)
for i, mesh in enumerate(meshes):
voxelized, origin = voxelize_closed_surface(
mesh, pitch=pitch, repair_mesh=repair_mesh
)
# get the coordinates of the voxels inside of the mesh
filled_voxel_coordinates = np.argwhere(voxelized)
# get the offset between the label image indices and the voxelized mesh indices
mesh_offset = np.round((origin - image_origin) / pitch, decimals=0)
# offset the voxel coordinates
filled_voxel_indices = np.round(
filled_voxel_coordinates + mesh_offset, decimals=0
).astype(int)
# set the label value
label_value = i + 1
label_image[
filled_voxel_indices[:, 0],
filled_voxel_indices[:, 1],
filled_voxel_indices[:, 2],
] = label_value
return label_image, image_origin
| [
"numpy.mean",
"numpy.ceil",
"numpy.asarray",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.zeros",
"trimesh.smoothing.filter_taubin",
"numpy.argwhere",
"trimesh.Trimesh",
"skimage.measure.marching_cubes",
"numpy.concatenate",
"pymeshfix.clean_from_arrays",
"numpy.round"
] | [((994, 1019), 'numpy.asarray', 'np.asarray', (['mesh.vertices'], {}), '(mesh.vertices)\n', (1004, 1019), True, 'import numpy as np\n'), ((1032, 1054), 'numpy.asarray', 'np.asarray', (['mesh.faces'], {}), '(mesh.faces)\n', (1042, 1054), True, 'import numpy as np\n'), ((1090, 1134), 'pymeshfix.clean_from_arrays', 'pymeshfix.clean_from_arrays', (['vertices', 'faces'], {}), '(vertices, faces)\n', (1117, 1134), False, 'import pymeshfix\n'), ((1185, 1236), 'trimesh.Trimesh', 'Trimesh', ([], {'vertices': 'vertices_clean', 'faces': 'faces_clean'}), '(vertices=vertices_clean, faces=faces_clean)\n', (1192, 1236), False, 'from trimesh import Trimesh\n'), ((2148, 2178), 'skimage.measure.marching_cubes', 'marching_cubes', (['object_mask', '(0)'], {}), '(object_mask, 0)\n', (2162, 2178), False, 'from skimage.measure import marching_cubes\n'), ((2214, 2258), 'pymeshfix.clean_from_arrays', 'pymeshfix.clean_from_arrays', (['vertices', 'faces'], {}), '(vertices, faces)\n', (2241, 2258), False, 'import pymeshfix\n'), ((2300, 2351), 'trimesh.Trimesh', 'Trimesh', ([], {'vertices': 'vertices_clean', 'faces': 'faces_clean'}), '(vertices=vertices_clean, faces=faces_clean)\n', (2307, 2351), False, 'from trimesh import Trimesh\n'), ((3306, 3335), 'numpy.mean', 'np.mean', (['bounding_box'], {'axis': '(0)'}), '(bounding_box, axis=0)\n', (3313, 3335), True, 'import numpy as np\n'), ((3574, 3613), 'numpy.max', 'np.max', (['(bounding_box - rounded_centroid)'], {}), '(bounding_box - rounded_centroid)\n', (3580, 3613), True, 'import numpy as np\n'), ((5793, 5831), 'numpy.concatenate', 'np.concatenate', (['bounding_boxes'], {'axis': '(0)'}), '(bounding_boxes, axis=0)\n', (5807, 5831), True, 'import numpy as np\n'), ((5849, 5876), 'numpy.min', 'np.min', (['all_corners'], {'axis': '(0)'}), '(all_corners, axis=0)\n', (5855, 5876), True, 'import numpy as np\n'), ((5894, 5921), 'numpy.max', 'np.max', (['all_corners'], {'axis': '(0)'}), '(all_corners, axis=0)\n', (5900, 5921), True, 'import numpy as np\n'), ((6676, 6721), 'numpy.zeros', 'np.zeros', (['image_shape_voxels'], {'dtype': 'np.uint16'}), '(image_shape_voxels, dtype=np.uint16)\n', (6684, 6721), True, 'import numpy as np\n'), ((765, 805), 'numpy.round', 'np.round', (['(coordinate / pitch)'], {'decimals': '(0)'}), '(coordinate / pitch, decimals=0)\n', (773, 805), True, 'import numpy as np\n'), ((2437, 2497), 'trimesh.smoothing.filter_taubin', 'filter_taubin', (['mesh'], {'iterations': 'n_mesh_smoothing_interations'}), '(mesh, iterations=n_mesh_smoothing_interations)\n', (2450, 2497), False, 'from trimesh.smoothing import filter_taubin\n'), ((3705, 3737), 'numpy.ceil', 'np.ceil', (['(cube_half_width / pitch)'], {}), '(cube_half_width / pitch)\n', (3712, 3737), True, 'import numpy as np\n'), ((6386, 6405), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (6394, 6405), True, 'import numpy as np\n'), ((6978, 7000), 'numpy.argwhere', 'np.argwhere', (['voxelized'], {}), '(voxelized)\n', (6989, 7000), True, 'import numpy as np\n'), ((7112, 7165), 'numpy.round', 'np.round', (['((origin - image_origin) / pitch)'], {'decimals': '(0)'}), '((origin - image_origin) / pitch, decimals=0)\n', (7120, 7165), True, 'import numpy as np\n'), ((6543, 6595), 'numpy.round', 'np.round', (['(image_shape_mesh_units / pitch)'], {'decimals': '(0)'}), '(image_shape_mesh_units / pitch, decimals=0)\n', (6551, 6595), True, 'import numpy as np\n'), ((7237, 7297), 'numpy.round', 'np.round', (['(filled_voxel_coordinates + mesh_offset)'], {'decimals': '(0)'}), '(filled_voxel_coordinates + mesh_offset, decimals=0)\n', (7245, 7297), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file : sst_dataset.py
@author: zijun
@contact : <EMAIL>
@date : 2020/11/17 11:45
@version: 1.0
@desc : sst5 and imdb task use the same dataset
"""
import os
from functools import partial
import torch
from transformers import RobertaTokenizer
from torch.utils.data import Dataset, DataLoader
from roberta_ses.datasets.collate_functions import collate_to_max_length
class SSTDataset(Dataset):
def __init__(self, directory, prefix, bert_path, max_length: int = 512):
super().__init__()
self.max_length = max_length
with open(os.path.join(directory, prefix + '.txt'), 'r', encoding='utf8') as f:
lines = f.readlines()
self.lines = lines
self.tokenizer = RobertaTokenizer.from_pretrained(bert_path)
def __len__(self):
return len(self.lines)
def __getitem__(self, idx):
line = self.lines[idx]
label, sentence = line.split('\t', 1)
# delete .
sentence = sentence.strip()
if sentence.endswith("."):
sentence = sentence[:-1]
input_ids = self.tokenizer.encode(sentence, add_special_tokens=False)
if len(input_ids) > self.max_length - 2:
input_ids = input_ids[:self.max_length - 2]
# convert list to tensor
length = torch.LongTensor([len(input_ids) + 2])
input_ids = torch.LongTensor([0] + input_ids + [2])
label = torch.LongTensor([int(label)])
return input_ids, label, length
def unit_test():
root_path = "/data/nfsdata2/sunzijun/sstc/imdb_data"
bert_path = "/data/nfsdata2/sunzijun/loop/roberta-base"
prefix = "train"
dataset = SSTDataset(directory=root_path, prefix=prefix, bert_path=bert_path)
dataloader = DataLoader(
dataset=dataset,
batch_size=10,
num_workers=0,
shuffle=False,
collate_fn=partial(collate_to_max_length, fill_values=[1, 0, 0])
)
for input_ids, label, length, start_index, end_index, span_mask in dataloader:
print(input_ids.shape)
print(start_index.shape)
print(end_index.shape)
print(span_mask.shape)
print(label.view(-1).shape)
print()
if __name__ == '__main__':
unit_test()
| [
"transformers.RobertaTokenizer.from_pretrained",
"torch.LongTensor",
"functools.partial",
"os.path.join"
] | [((765, 808), 'transformers.RobertaTokenizer.from_pretrained', 'RobertaTokenizer.from_pretrained', (['bert_path'], {}), '(bert_path)\n', (797, 808), False, 'from transformers import RobertaTokenizer\n'), ((1393, 1432), 'torch.LongTensor', 'torch.LongTensor', (['([0] + input_ids + [2])'], {}), '([0] + input_ids + [2])\n', (1409, 1432), False, 'import torch\n'), ((1902, 1955), 'functools.partial', 'partial', (['collate_to_max_length'], {'fill_values': '[1, 0, 0]'}), '(collate_to_max_length, fill_values=[1, 0, 0])\n', (1909, 1955), False, 'from functools import partial\n'), ((609, 649), 'os.path.join', 'os.path.join', (['directory', "(prefix + '.txt')"], {}), "(directory, prefix + '.txt')\n", (621, 649), False, 'import os\n')] |
# Copyright (c) 2021 The University of Texas at Austin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: <NAME>
#
"""
This script tests forking gem5 with the KVM cores and switching cores in the
child process. First, the test boots linux with KVM and tests fast-forwarding
with instruction exit events. Then the test forks the simulation, waits for the
child to simulate until completion, and then simulates to completion in the
parent process.
"""
import argparse
import os
import sys
from textwrap import dedent
import m5
from m5.objects import Root
from gem5.components.boards.x86_board import X86Board
from gem5.coherence_protocol import CoherenceProtocol
from gem5.isas import ISA
from gem5.components.memory.single_channel import SingleChannelDDR3_1600
from gem5.components.processors.cpu_types import CPUTypes
from gem5.components.processors.simple_switchable_processor import (
SimpleSwitchableProcessor,
)
from gem5.resources.resource import Resource
from gem5.runtime import (
get_runtime_coherence_protocol, get_runtime_isa
)
from gem5.utils.requires import requires
parser = argparse.ArgumentParser(
description="A script to test forking gem5 and switching cpus."
)
parser.add_argument(
"-m",
"--mem-system",
type=str,
choices=("classic", "mi_example", "mesi_two_level"),
required=True,
help="The memory system.",
)
parser.add_argument(
"-n",
"--num-cpus",
type=int,
choices=(1, 2, 4, 8),
default=4,
help="The number of CPUs.",
)
parser.add_argument(
"-c",
"--cpu",
type=str,
choices=("kvm", "atomic", "timing", "o3"),
required=True,
help="The CPU type.",
)
parser.add_argument(
"-r",
"--resource-directory",
type=str,
required=False,
help="The directory in which resources will be downloaded or exist.",
)
parser.add_argument(
"-o",
"--override-download",
action="store_true",
help="Override a local resource if the hashes do not match.",
)
parser.add_argument(
"-k",
"--kernel-args",
type=str,
default="init=/root/gem5_init.sh",
help="Additional kernel boot arguments.",
)
parser.add_argument(
"-f",
"--num-forks",
type=int,
default=4,
help="The number of times to fork gem5.",
)
args = parser.parse_args()
coherence_protocol_required = None
if args.mem_system == "mi_example":
coherence_protocol_required = CoherenceProtocol.MI_EXAMPLE
elif args.mem_system == "mesi_two_level":
coherence_protocol_required = CoherenceProtocol.MESI_TWO_LEVEL
requires(
isa_required=ISA.X86,
coherence_protocol_required=coherence_protocol_required,
kvm_required=(args.cpu == "kvm"),
)
cache_hierarchy = None
if args.mem_system == "mi_example":
from gem5.components.cachehierarchies.ruby.\
mi_example_cache_hierarchy import (
MIExampleCacheHierarchy,
)
cache_hierarchy = MIExampleCacheHierarchy(size="32kB", assoc=8)
elif args.mem_system == "mesi_two_level":
from gem5.components.cachehierarchies.ruby.\
mesi_two_level_cache_hierarchy import (
MESITwoLevelCacheHierarchy,
)
cache_hierarchy = MESITwoLevelCacheHierarchy(
l1d_size="16kB",
l1d_assoc=8,
l1i_size="16kB",
l1i_assoc=8,
l2_size="256kB",
l2_assoc=16,
num_l2_banks=1,
)
elif args.mem_system == "classic":
from gem5.components.cachehierarchies.classic.\
private_l1_cache_hierarchy import (
PrivateL1CacheHierarchy,
)
cache_hierarchy = PrivateL1CacheHierarchy(l1d_size="16kB", l1i_size="16kB")
else:
raise NotImplementedError(
"Memory system '{}' is not supported in the boot tests.".format(
args.mem_system
)
)
assert cache_hierarchy != None
# Setup the system memory.
memory = SingleChannelDDR3_1600(size="3GB")
# Setup a Processor.
cpu_type = None
if args.cpu == "kvm":
cpu_type = CPUTypes.KVM
elif args.cpu == "atomic":
cpu_type = CPUTypes.ATOMIC
elif args.cpu == "timing":
cpu_type = CPUTypes.TIMING
elif args.cpu == "o3":
cpu_type = CPUTypes.O3
else:
raise NotImplementedError(
"CPU type '{}' is not supported in the boot tests.".format(args.cpu)
)
assert cpu_type != None
processor = SimpleSwitchableProcessor(
starting_core_type=CPUTypes.KVM,
switch_core_type=cpu_type,
num_cores=args.num_cpus,
)
# Setup the motherboard.
motherboard = X86Board(
clk_freq="3GHz",
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
exit_on_work_items=True,
)
motherboard.connect_things()
# Set the Full System workload.
motherboard.set_workload(
kernel=Resource(
"x86-linux-kernel-5.4.49",
override=args.override_download,
resource_directory=args.resource_directory,
),
disk_image=Resource(
"x86-ubuntu-img",
override=args.override_download,
resource_directory=args.resource_directory,
),
command=dedent(
"""
m5 exit # signal end of boot
m5 exit # exit in children and parent
"""
),
kernel_args=[args.kernel_args]
)
# Begin running of the simulation. This will exit once the Linux system boot
# is complete.
print("Running with ISA: " + get_runtime_isa().name)
print("Running with protocol: " + get_runtime_coherence_protocol().name)
print()
root = Root(full_system=True, system=motherboard)
# TODO: This of annoying. Is there a way to fix this to happen
# automatically when running KVM?
root.sim_quantum = int(1e9)
# Disable the gdb ports. Required for forking.
m5.disableAllListeners()
m5.instantiate()
# Simulate the inital boot with the starting KVM cpu
exit_event = m5.simulate()
print("Boot finished", exit_event.getCause())
print("Starting fork and switch processors test")
pids = []
for i in range(args.num_forks):
pid = m5.fork("%(parent)s/" + str(m5.curTick()))
if pid == 0: # in child
print(f"Switching processors in child {i}.")
processor.switch()
exit_event = m5.simulate()
if exit_event.getCause() != "m5_exit instruction encountered":
raise Exception(f"Expected m5 exit, got {exit_event.getCause()}")
print("Child finished, exiting: ", exit_event.getCause())
sys.exit(0)
else:
pids.append(pid)
print("Waiting for children...")
for pid in pids:
print (os.waitpid(pid, 0))
print("Children finished! Running to completion in parent.")
exit_event = m5.simulate()
if exit_event.getCause() != "m5_exit instruction encountered":
raise Exception(f"Expected m5 exit, got {exit_event.getCause()}")
| [
"gem5.utils.requires.requires",
"gem5.components.cachehierarchies.ruby.mi_example_cache_hierarchy.MIExampleCacheHierarchy",
"m5.disableAllListeners",
"sys.exit",
"textwrap.dedent",
"argparse.ArgumentParser",
"m5.curTick",
"gem5.components.boards.x86_board.X86Board",
"gem5.runtime.get_runtime_coheren... | [((2546, 2639), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A script to test forking gem5 and switching cpus."""'}), "(description=\n 'A script to test forking gem5 and switching cpus.')\n", (2569, 2639), False, 'import argparse\n'), ((3978, 4102), 'gem5.utils.requires.requires', 'requires', ([], {'isa_required': 'ISA.X86', 'coherence_protocol_required': 'coherence_protocol_required', 'kvm_required': "(args.cpu == 'kvm')"}), "(isa_required=ISA.X86, coherence_protocol_required=\n coherence_protocol_required, kvm_required=args.cpu == 'kvm')\n", (3986, 4102), False, 'from gem5.utils.requires import requires\n'), ((5251, 5285), 'gem5.components.memory.single_channel.SingleChannelDDR3_1600', 'SingleChannelDDR3_1600', ([], {'size': '"""3GB"""'}), "(size='3GB')\n", (5273, 5285), False, 'from gem5.components.memory.single_channel import SingleChannelDDR3_1600\n'), ((5698, 5813), 'gem5.components.processors.simple_switchable_processor.SimpleSwitchableProcessor', 'SimpleSwitchableProcessor', ([], {'starting_core_type': 'CPUTypes.KVM', 'switch_core_type': 'cpu_type', 'num_cores': 'args.num_cpus'}), '(starting_core_type=CPUTypes.KVM, switch_core_type\n =cpu_type, num_cores=args.num_cpus)\n', (5723, 5813), False, 'from gem5.components.processors.simple_switchable_processor import SimpleSwitchableProcessor\n'), ((5864, 5987), 'gem5.components.boards.x86_board.X86Board', 'X86Board', ([], {'clk_freq': '"""3GHz"""', 'processor': 'processor', 'memory': 'memory', 'cache_hierarchy': 'cache_hierarchy', 'exit_on_work_items': '(True)'}), "(clk_freq='3GHz', processor=processor, memory=memory,\n cache_hierarchy=cache_hierarchy, exit_on_work_items=True)\n", (5872, 5987), False, 'from gem5.components.boards.x86_board import X86Board\n'), ((6810, 6852), 'm5.objects.Root', 'Root', ([], {'full_system': '(True)', 'system': 'motherboard'}), '(full_system=True, system=motherboard)\n', (6814, 6852), False, 'from m5.objects import Root\n'), ((7027, 7051), 'm5.disableAllListeners', 'm5.disableAllListeners', ([], {}), '()\n', (7049, 7051), False, 'import m5\n'), ((7053, 7069), 'm5.instantiate', 'm5.instantiate', ([], {}), '()\n', (7067, 7069), False, 'import m5\n'), ((7137, 7150), 'm5.simulate', 'm5.simulate', ([], {}), '()\n', (7148, 7150), False, 'import m5\n'), ((7914, 7927), 'm5.simulate', 'm5.simulate', ([], {}), '()\n', (7925, 7927), False, 'import m5\n'), ((4330, 4375), 'gem5.components.cachehierarchies.ruby.mi_example_cache_hierarchy.MIExampleCacheHierarchy', 'MIExampleCacheHierarchy', ([], {'size': '"""32kB"""', 'assoc': '(8)'}), "(size='32kB', assoc=8)\n", (4353, 4375), False, 'from gem5.components.cachehierarchies.ruby.mi_example_cache_hierarchy import MIExampleCacheHierarchy\n'), ((4580, 4716), 'gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy.MESITwoLevelCacheHierarchy', 'MESITwoLevelCacheHierarchy', ([], {'l1d_size': '"""16kB"""', 'l1d_assoc': '(8)', 'l1i_size': '"""16kB"""', 'l1i_assoc': '(8)', 'l2_size': '"""256kB"""', 'l2_assoc': '(16)', 'num_l2_banks': '(1)'}), "(l1d_size='16kB', l1d_assoc=8, l1i_size='16kB',\n l1i_assoc=8, l2_size='256kB', l2_assoc=16, num_l2_banks=1)\n", (4606, 4716), False, 'from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import MESITwoLevelCacheHierarchy\n'), ((6107, 6223), 'gem5.resources.resource.Resource', 'Resource', (['"""x86-linux-kernel-5.4.49"""'], {'override': 'args.override_download', 'resource_directory': 'args.resource_directory'}), "('x86-linux-kernel-5.4.49', override=args.override_download,\n resource_directory=args.resource_directory)\n", (6115, 6223), False, 'from gem5.resources.resource import Resource\n'), ((6267, 6374), 'gem5.resources.resource.Resource', 'Resource', (['"""x86-ubuntu-img"""'], {'override': 'args.override_download', 'resource_directory': 'args.resource_directory'}), "('x86-ubuntu-img', override=args.override_download,\n resource_directory=args.resource_directory)\n", (6275, 6374), False, 'from gem5.resources.resource import Resource\n'), ((6415, 6531), 'textwrap.dedent', 'dedent', (['"""\n m5 exit # signal end of boot\n m5 exit # exit in children and parent\n """'], {}), '(\n """\n m5 exit # signal end of boot\n m5 exit # exit in children and parent\n """\n )\n', (6421, 6531), False, 'from textwrap import dedent\n'), ((7473, 7486), 'm5.simulate', 'm5.simulate', ([], {}), '()\n', (7484, 7486), False, 'import m5\n'), ((7710, 7721), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7718, 7721), False, 'import sys\n'), ((7819, 7837), 'os.waitpid', 'os.waitpid', (['pid', '(0)'], {}), '(pid, 0)\n', (7829, 7837), False, 'import os\n'), ((4969, 5026), 'gem5.components.cachehierarchies.classic.private_l1_cache_hierarchy.PrivateL1CacheHierarchy', 'PrivateL1CacheHierarchy', ([], {'l1d_size': '"""16kB"""', 'l1i_size': '"""16kB"""'}), "(l1d_size='16kB', l1i_size='16kB')\n", (4992, 5026), False, 'from gem5.components.cachehierarchies.classic.private_l1_cache_hierarchy import PrivateL1CacheHierarchy\n'), ((6697, 6714), 'gem5.runtime.get_runtime_isa', 'get_runtime_isa', ([], {}), '()\n', (6712, 6714), False, 'from gem5.runtime import get_runtime_coherence_protocol, get_runtime_isa\n'), ((6755, 6787), 'gem5.runtime.get_runtime_coherence_protocol', 'get_runtime_coherence_protocol', ([], {}), '()\n', (6785, 6787), False, 'from gem5.runtime import get_runtime_coherence_protocol, get_runtime_isa\n'), ((7328, 7340), 'm5.curTick', 'm5.curTick', ([], {}), '()\n', (7338, 7340), False, 'import m5\n')] |
# -*- coding: utf-8 -*-
import os
import timeit
import xarray
import rioxarray
import pandas as pd
wd = os.getcwd()
catalog = os.path.join('data', 'LC08_L1TP_190024_20200418_20200822_02_T1')
rasters = os.listdir(catalog)
rasters = [r for r in rasters if r.endswith(('.TIF'))]
rasters = [os.path.join(wd, catalog, r) for r in rasters]
### raster stack
band_names = ["B1", "B10", "B11", "B2", "B3", "B4", "B5", "B6", "B7", "B9"]
ras = []
for i, path in enumerate(rasters):
ras.append(rioxarray.open_rasterio(path, masked = True).squeeze())
ras = xarray.concat(ras, "band")
ras.coords["band"] = band_names
t_list = [None] * 10
stack_file = 'stack.TIF'
for i in range(10):
tic = timeit.default_timer()
ras.rio.to_raster(stack_file, dtype = "uint16", compress = "LZW")
toc = timeit.default_timer()
t_list[i] = round(toc - tic, 2)
os.remove(stack_file)
df = {'task': ['write'] * 10, 'package': ['rioxarray'] * 10, 'time': t_list}
df = pd.DataFrame.from_dict(df)
if not os.path.isdir('results'): os.mkdir('results')
savepath = os.path.join('results', 'write-rioxarray.csv')
df.to_csv(savepath, index = False, decimal = ',', sep = ';') | [
"rioxarray.open_rasterio",
"os.listdir",
"timeit.default_timer",
"os.path.join",
"pandas.DataFrame.from_dict",
"os.getcwd",
"xarray.concat",
"os.path.isdir",
"os.mkdir",
"os.remove"
] | [((114, 125), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (123, 125), False, 'import os\n'), ((137, 201), 'os.path.join', 'os.path.join', (['"""data"""', '"""LC08_L1TP_190024_20200418_20200822_02_T1"""'], {}), "('data', 'LC08_L1TP_190024_20200418_20200822_02_T1')\n", (149, 201), False, 'import os\n'), ((213, 232), 'os.listdir', 'os.listdir', (['catalog'], {}), '(catalog)\n', (223, 232), False, 'import os\n'), ((576, 602), 'xarray.concat', 'xarray.concat', (['ras', '"""band"""'], {}), "(ras, 'band')\n", (589, 602), False, 'import xarray\n'), ((892, 913), 'os.remove', 'os.remove', (['stack_file'], {}), '(stack_file)\n', (901, 913), False, 'import os\n'), ((1002, 1028), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['df'], {}), '(df)\n', (1024, 1028), True, 'import pandas as pd\n'), ((1095, 1141), 'os.path.join', 'os.path.join', (['"""results"""', '"""write-rioxarray.csv"""'], {}), "('results', 'write-rioxarray.csv')\n", (1107, 1141), False, 'import os\n'), ((301, 329), 'os.path.join', 'os.path.join', (['wd', 'catalog', 'r'], {}), '(wd, catalog, r)\n', (313, 329), False, 'import os\n'), ((720, 742), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (740, 742), False, 'import timeit\n'), ((829, 851), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (849, 851), False, 'import timeit\n'), ((1037, 1061), 'os.path.isdir', 'os.path.isdir', (['"""results"""'], {}), "('results')\n", (1050, 1061), False, 'import os\n'), ((1063, 1082), 'os.mkdir', 'os.mkdir', (['"""results"""'], {}), "('results')\n", (1071, 1082), False, 'import os\n'), ((511, 553), 'rioxarray.open_rasterio', 'rioxarray.open_rasterio', (['path'], {'masked': '(True)'}), '(path, masked=True)\n', (534, 553), False, 'import rioxarray\n')] |
''' Miscellaneous internal utilities. '''
import collections
import os
from abc import ABCMeta, abstractmethod, abstractproperty
from types import GeneratorType
from itertools import islice
from tqdm import tqdm
import pandas as pd
from pliers import config
from pliers.support.exceptions import MissingDependencyError
def listify(obj):
''' Wraps all non-list or tuple objects in a list; provides a simple way
to accept flexible arguments. '''
return obj if isinstance(obj, (list, tuple, type(None))) else [obj]
def flatten(l):
''' Flatten an iterable. '''
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, str):
yield from flatten(el)
else:
yield el
def flatten_dict(d, parent_key='', sep='_'):
''' Flattens a multi-level dictionary into a single level by concatenating
nested keys with the char provided in the sep argument.
Solution from https://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys'''
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def batch_iterable(l, n):
''' Chunks iterable into n sized batches
Solution from: http://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery'''
i = iter(l)
piece = list(islice(i, n))
while piece:
yield piece
piece = list(islice(i, n))
def set_iterable_type(obj):
''' Returns either a generator or a list depending on config-level
settings. Should be used to wrap almost every internal iterable return.
Also inspects elements recursively in the case of list returns, to
ensure that there are no nested generators. '''
if not isiterable(obj):
return obj
if config.get_option('use_generators'):
return obj if isgenerator(obj) else (i for i in obj)
else:
return [set_iterable_type(i) for i in obj]
class classproperty:
''' Implements a @classproperty decorator analogous to @classmethod.
Solution from: http://stackoverflow.com/questions/128573/using-property-on-classmethodss
'''
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
def isiterable(obj):
''' Returns True if the object is one of allowable iterable types. '''
return isinstance(obj, (list, tuple, pd.Series, GeneratorType, tqdm))
def isgenerator(obj):
''' Returns True if object is a generator, or a generator wrapped by a
tqdm object. '''
return isinstance(obj, GeneratorType) or (hasattr(obj, 'iterable') and
isinstance(getattr(obj, 'iterable'), GeneratorType))
def progress_bar_wrapper(iterable, **kwargs):
''' Wrapper that applies tqdm progress bar conditional on config settings.
'''
return tqdm(iterable, **kwargs) if (config.get_option('progress_bar')
and not isinstance(iterable, tqdm)) else iterable
module_names = {}
Dependency = collections.namedtuple('Dependency', 'package value')
def attempt_to_import(dependency, name=None, fromlist=None):
if name is None:
name = dependency
try:
mod = __import__(dependency, fromlist=fromlist)
except ImportError:
mod = None
module_names[name] = Dependency(dependency, mod)
return mod
def verify_dependencies(dependencies):
missing = []
for dep in listify(dependencies):
if module_names[dep].value is None:
missing.append(module_names[dep].package)
if missing:
raise MissingDependencyError(missing)
class EnvironmentKeyMixin:
@classproperty
def _env_keys(cls):
pass
@classproperty
def env_keys(cls):
return listify(cls._env_keys)
@classproperty
def available(cls):
return all([k in os.environ for k in cls.env_keys])
class APIDependent(EnvironmentKeyMixin, metaclass=ABCMeta):
_rate_limit = 0
def __init__(self, rate_limit=None, **kwargs):
self.transformed_stim_count = 0
self.validated_keys = set()
self.rate_limit = rate_limit if rate_limit else self._rate_limit
self._last_request_time = 0
super().__init__(**kwargs)
@abstractproperty
def api_keys(self):
pass
def validate_keys(self):
if all(k in self.validated_keys for k in self.api_keys):
return True
else:
valid = self.check_valid_keys()
if valid:
for k in self.api_keys:
self.validated_keys.add(k)
return valid
@abstractmethod
def check_valid_keys(self):
pass
| [
"itertools.islice",
"collections.namedtuple",
"tqdm.tqdm",
"pliers.support.exceptions.MissingDependencyError",
"pliers.config.get_option"
] | [((3248, 3301), 'collections.namedtuple', 'collections.namedtuple', (['"""Dependency"""', '"""package value"""'], {}), "('Dependency', 'package value')\n", (3270, 3301), False, 'import collections\n'), ((2023, 2058), 'pliers.config.get_option', 'config.get_option', (['"""use_generators"""'], {}), "('use_generators')\n", (2040, 2058), False, 'from pliers import config\n'), ((1582, 1594), 'itertools.islice', 'islice', (['i', 'n'], {}), '(i, n)\n', (1588, 1594), False, 'from itertools import islice\n'), ((3094, 3118), 'tqdm.tqdm', 'tqdm', (['iterable'], {}), '(iterable, **kwargs)\n', (3098, 3118), False, 'from tqdm import tqdm\n'), ((3812, 3843), 'pliers.support.exceptions.MissingDependencyError', 'MissingDependencyError', (['missing'], {}), '(missing)\n', (3834, 3843), False, 'from pliers.support.exceptions import MissingDependencyError\n'), ((1654, 1666), 'itertools.islice', 'islice', (['i', 'n'], {}), '(i, n)\n', (1660, 1666), False, 'from itertools import islice\n'), ((3123, 3156), 'pliers.config.get_option', 'config.get_option', (['"""progress_bar"""'], {}), "('progress_bar')\n", (3140, 3156), False, 'from pliers import config\n')] |
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import functools
import numpy as np
from lmnet.datasets.cifar100 import Cifar100
from lmnet.datasets.base import DistributionInterface
from lmnet.utils.random import shuffle
class Cifar100Distribution(Cifar100, DistributionInterface):
def __init__(
self,
subset="train",
batch_size=100,
*args,
**kwargs
):
super().__init__(
subset=subset,
batch_size=batch_size,
*args,
**kwargs,
)
self._init_images_and_labels()
@functools.lru_cache(maxsize=None)
def _images_and_labels(self):
if self.subset == "train":
files = ["train"]
else:
files = ["test"]
data = [self._load_data(filename) for filename in files]
images = [images for images, labels in data]
images = np.concatenate(images, axis=0)
labels = [labels for images, labels in data]
labels = np.concatenate(labels, axis=0)
return images, labels
def update_dataset(self, indices):
"""Update own dataset by indices."""
# Re Initialize dataset
self._init_images_and_labels()
# Update dataset by given indices
self.images = self.images[indices, :]
self.labels = self.labels[indices]
self.current_element_index = 0
def get_shuffle_index(self):
"""Return list of shuffled index."""
images, _ = self._images_and_labels()
random_indices = shuffle(range(len(images)), seed=self.seed)
print("Shuffle {} train dataset with random state {}.".format(self.__class__.__name__, self.seed))
self.seed += 1
return random_indices
| [
"functools.lru_cache",
"numpy.concatenate"
] | [((1271, 1304), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (1290, 1304), False, 'import functools\n'), ((1585, 1615), 'numpy.concatenate', 'np.concatenate', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (1599, 1615), True, 'import numpy as np\n'), ((1687, 1717), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (1701, 1717), True, 'import numpy as np\n')] |
import inspect
import functools
from gridengine import job, dispatch, schedulers
# ----------------------------------------------------------------------------
# Partial
# ----------------------------------------------------------------------------
def isexception(x):
"""Test whether the value is an Exception instance"""
return isinstance(x, Exception)
def isnumeric(x):
"""Test whether the value can be represented as a number"""
try:
float(x)
return True
except:
return False
def partial(f, *args, **kwargs):
"""Return a callable partially closed over the input function and arguments
partial is functionally equivalent to functools.partial, however it also
applies a variant of functools.update_wrapper, with:
__doc__ = f.__doc__
__module__ = f.__module__
__name__ = f.__name__ + string_representation_of_closed_arguments
This is useful for running functions with different parameter sets, whilst
being able to identify the variants by name
"""
def name(var):
try:
return var.__name__
except AttributeError:
return str(var)[0:5] if isnumeric(var) else var.__class__.__name__
g = functools.partial(f, *args, **kwargs)
g.__doc__ = f.__doc__
g.__module__ = f.__module__
g.__name__ = '_'.join([f.__name__] + [name(arg) for arg in list(args)+list(kwargs.values())])
return g
# ----------------------------------------------------------------------------
# Map
# ----------------------------------------------------------------------------
def map(f, args, scheduler=schedulers.best_available, reraise=True):
"""Perform a functional-style map operation
Apply a function f to each argument in the iterable args. This is equivalent to
y = [f(x) for x in args]
or
y = map(f, args)
except that each argument in the iterable is assigned to a separate Job
and scheduled to run via the scheduler.
The default scheduler is a schedulers.ProcessScheduler instance. To run map
on a grid engine, simply pass a schedulers.GridEngineScheduler instance.
Args:
f (func): A picklable function
args (iterable): An iterable (list) of arguments to f
Keyword Args:
scheduler: A schedulers.Scheduler instance or class. By default, the
system tries to return the best_available() scheduler. Use this if you
want to set a scheduler specifically.
reraise (bool): Reraise exceptions that occur in any of the jobs. Set this
to False if you want to salvage any good results.
Returns:
List of return values equivalent to the builtin map function
Raises:
Any exception that would occur when applying [f(x) for x in args]
"""
# setup the dispatcher
dispatcher = dispatch.JobDispatcher(scheduler)
# allocate the jobs
jobs = [job.Job(target=f, args=(arg,)) for arg in args]
# run the jobs (guaranteed to return in the same order)
dispatcher.dispatch(jobs)
results = dispatcher.join()
# check for exceptions
if reraise:
for exception in filter(isexception, results):
# an error occurred during execution of one of the jobs, reraise it
raise exception
return results
| [
"gridengine.job.Job",
"gridengine.dispatch.JobDispatcher",
"functools.partial"
] | [((1166, 1203), 'functools.partial', 'functools.partial', (['f', '*args'], {}), '(f, *args, **kwargs)\n', (1183, 1203), False, 'import functools\n'), ((2712, 2745), 'gridengine.dispatch.JobDispatcher', 'dispatch.JobDispatcher', (['scheduler'], {}), '(scheduler)\n', (2734, 2745), False, 'from gridengine import job, dispatch, schedulers\n'), ((2779, 2809), 'gridengine.job.Job', 'job.Job', ([], {'target': 'f', 'args': '(arg,)'}), '(target=f, args=(arg,))\n', (2786, 2809), False, 'from gridengine import job, dispatch, schedulers\n')] |
#!/usr/bin/env python
#
# Video pupilometry functions
# - takes calibration and gaze video filenames as input
# - controls calibration and gaze estimation workflow
#
# USAGE : mrgaze.py <Calibration Video> <Gaze Video>
#
# AUTHOR : <NAME>
# PLACE : Caltech
# DATES : 2014-05-07 JMT From scratch
# 2016-02-22 JMT Update print for python3. Remove unused vars, imports
#
# This file is part of mrgaze.
#
# mrgaze is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mrgaze is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mrgaze. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014 California Institute of Technology.
import os
import time
import getpass
import cv2
from mrgaze import media, utils, config, calibrate, report, engine
def LivePupilometry(data_dir, live_eyetracking=False):
"""
Perform pupil boundary ellipse fitting on camera feed
Arguments
----
data_dir : string
Root data directory path.
cfg :
Analysis configuration parameters
Returns
----
pupils : boolean
Completion status (True = successful)
"""
# If user did not provide a root data directory, we use HOME/mrgaze
if data_dir == '':
data_dir = os.path.join(os.getenv("HOME"), 'mrgaze')
# Full video file paths
hostname = os.uname()[1]
username = getpass.getuser()
ss_dir = os.path.join(data_dir, "%s_%s_%s" % (hostname, username, int(time.time())))
else:
ss_dir = data_dir
# Load Configuration
cfg = config.LoadConfig(data_dir)
cfg_ts = time.time()
# Output flags
verbose = cfg.getboolean('OUTPUT', 'verbose')
overwrite = cfg.getboolean('OUTPUT', 'overwrite')
# Video information
# vin_ext = cfg.get('VIDEO', 'inputextension')
vout_ext = cfg.get('VIDEO' ,'outputextension')
# vin_fps = cfg.getfloat('VIDEO', 'inputfps')
# Flag for freeze frame
freeze_frame = False
vid_dir = os.path.join(ss_dir, 'videos')
res_dir = os.path.join(ss_dir, 'results')
vout_path = os.path.join(vid_dir, 'gaze' + vout_ext)
cal_vout_path = os.path.join(vid_dir, 'cal' + vout_ext)
# if we do live eye-tracking, we read in what would be the output of the live eye-tracking
if not live_eyetracking:
vin_path = vout_path
cal_vin_path = cal_vout_path
else:
vin_path = 0
# Raw and filtered pupilometry CSV file paths
cal_pupils_csv = os.path.join(res_dir, 'cal_pupils.csv')
pupils_csv = os.path.join(res_dir, 'gaze_pupils.csv')
# Check that output directory exists
if not os.path.isdir(res_dir):
os.makedirs(res_dir)
print('* %s does not exist - creating' % res_dir)
if not os.path.isdir(vid_dir):
os.makedirs(vid_dir)
print('* %s does not exist - creating' % vid_dir)
# Set up the LBP cascade classifier
LBP_path = os.path.join(utils._package_root(), 'Cascade/cascade.xml')
print(' Loading LBP cascade')
cascade = cv2.CascadeClassifier(LBP_path)
if cascade.empty():
print('* LBP cascade is empty - mrgaze installation problem')
return False
# Check for output CSV existance and overwrite flag
if os.path.isfile(pupils_csv):
print('+ Pupilometry output already exists - checking overwrite flag')
if overwrite:
print('+ Overwrite allowed - continuing')
else:
print('+ Overwrite forbidden - skipping pupilometry')
return True
#
# Camera Input
#
print(' Opening camera stream')
try:
if not live_eyetracking:
vin_stream = cv2.VideoCapture(vin_path)
cal_vin_stream = cv2.VideoCapture(cal_vin_path)
else:
vin_stream = cv2.VideoCapture(vin_path)
cal_vin_stream = vin_stream
except:
print('* Problem opening input video stream - skipping pupilometry')
return False
while not vin_stream.isOpened():
print("Waiting for Camera.")
key = utils._waitKey(500)
if key == 'ESC':
print("User Abort.")
break
if not vin_stream.isOpened():
print('* Video input stream not opened - skipping pupilometry')
return False
if not cal_vin_stream.isOpened():
print('* Calibration video input stream not opened - skipping pupilometry')
return False
# Video FPS from metadata
# TODO: may not work with Quicktime videos
# fps = vin_stream.get(cv2.cv.CV_CAP_PROP_FPS)
# fps = cfg.getfloat('CAMERA', 'fps')
# Desired time between frames in milliseconds
# time_bw_frames = 1000.0 / fps
vin_stream.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
vin_stream.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
vin_stream.set(cv2.cv.CV_CAP_PROP_FPS, 30)
# Total number of frames in video file
# nf = vin_stream.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
# print(' Video has %d frames at %0.3f fps' % (nf, vin_fps))
# Read first preprocessed video frame from stream
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
# Get size of preprocessed frame for output video setup
nx, ny = frame.shape[1], frame.shape[0]
# By default we start in non-calibration mode
# switch between gaze/cal modes by pressing key "c"
do_cal = False
while keep_going:
if do_cal == False:
#
# Output video
#
if live_eyetracking:
print(' Opening output video stream')
# Output video codec (MP4V - poor quality compression)
# TODO : Find a better multiplatform codec
fourcc = cv2.cv.CV_FOURCC('m','p','4','v')
try:
vout_stream = cv2.VideoWriter(vout_path, fourcc, 30, (nx, ny), True)
except:
print('* Problem creating output video stream - skipping pupilometry')
return False
if not vout_stream.isOpened():
print('* Output video not opened - skipping pupilometry')
return False
# Open pupilometry CSV file to write
try:
pupils_stream = open(pupils_csv, 'w')
except:
print('* Problem opening pupilometry CSV file - skipping pupilometry')
return False
#
# Main Video Frame Loop
#
# Print verbose column headers
if verbose:
print('')
print(' %10s %10s %10s %10s %10s' % (
'Time (s)', 'Area', 'Blink', 'Artifact', 'FPS'))
# Init frame counter
fc = 0
# Init processing timer
t0 = time.time()
t = t0
while keep_going:
# check whether config file has been updated, reload of that is the case
if fc % 30 == 0:
cfg_mtime = os.path.getmtime(os.path.join(data_dir, 'mrgaze.cfg'))
if cfg_mtime > cfg_ts:
print("Updating Configuration")
cfg = config.LoadConfig(data_dir)
cfg_ts = time.time()
# Current video time in seconds
t = time.time()
# -------------------------------------
# Pass this frame to pupilometry engine
# -------------------------------------
# b4_engine = time.time()
pupil_ellipse, roi_rect, blink, glint, frame_rgb = engine.PupilometryEngine(frame, cascade, cfg)
# print "Enging took %s ms" % (time.time() - b4_engine)
# Derive pupilometry parameters
px, py, area = engine.PupilometryPars(pupil_ellipse, glint, cfg)
# Write data line to pupilometry CSV file
pupils_stream.write(
'%0.4f,%0.3f,%0.3f,%0.3f,%d,%0.3f,\n' %
(t, area, px, py, blink, art_power)
)
if live_eyetracking:
# Write output video frame
vout_stream.write(frame_orig)
# Read next frame, unless we want to figure out the correct settings for this frame
if not freeze_frame:
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
# Increment frame counter
fc = fc + 1
# Report processing FPS
if verbose:
if fc % 100 == 0:
pfps = fc / (time.time() - t0)
print(' %10.1f %10.1f %10d %10.3f %10.1f' % (
t, area, blink, art_power, pfps))
t0 = time.time()
fc = 0
# wait whether user pressed esc to exit the experiment
key = utils._waitKey(1)
if key == 'ESC':
# Clean up
if live_eyetracking:
vout_stream.release()
pupils_stream.close()
keep_going = False
elif key == 'c':
# Clean up
if live_eyetracking:
vout_stream.release()
pupils_stream.close()
do_cal = True
print("Starting calibration.")
break
elif key == 'f':
freeze_frame = not freeze_frame
else: # do calibration
#
# Output video
#
if live_eyetracking:
print(' Opening output video stream')
# Output video codec (MP4V - poor quality compression)
# TODO : Find a better multiplatform codec
fourcc = cv2.cv.CV_FOURCC('m','p','4','v')
try:
cal_vout_stream = cv2.VideoWriter(cal_vout_path, fourcc, 30, (nx, ny), True)
except:
print('* Problem creating output video stream - skipping pupilometry')
return False
if not cal_vout_stream.isOpened():
print('* Output video not opened - skipping pupilometry')
return False
# Open pupilometry CSV file to write
try:
cal_pupils_stream = open(cal_pupils_csv, 'w')
except:
print('* Problem opening pupilometry CSV file - skipping pupilometry')
return False
#
# Main Video Frame Loop
#
# Print verbose column headers
if verbose:
print('')
print(' %10s %10s %10s %10s %10s' % (
'Time (s)', 'Area', 'Blink', 'Artifact', 'FPS'))
# Init frame counter
fc = 0
# Init processing timer
t0 = time.time()
t = t0
while keep_going:
# check whether config file has been updated, reload of that is the case
if fc % 30 == 0:
cfg_mtime = os.path.getmtime(os.path.join(data_dir, 'mrgaze.cfg'))
if cfg_mtime > cfg_ts:
print("Updating Configuration")
cfg = config.LoadConfig(data_dir)
cfg_ts = time.time()
# Current video time in seconds
t = time.time()
# -------------------------------------
# Pass this frame to pupilometry engine
# -------------------------------------
# b4_engine = time.time()
pupil_ellipse, roi_rect, blink, glint, frame_rgb = engine.PupilometryEngine(frame, cascade, cfg)
# print "Engine took %s ms" % (time.time() - b4_engine)
# Derive pupilometry parameters
px, py, area = engine.PupilometryPars(pupil_ellipse, glint, cfg)
# Write data line to pupilometry CSV file
cal_pupils_stream.write(
'%0.4f,%0.3f,%0.3f,%0.3f,%d,%0.3f,\n' %
(t, area, px, py, blink, art_power)
)
# Write output video frame
if live_eyetracking:
cal_vout_stream.write(frame_orig)
# Read next frame (if available)
# if verbose:
# b4_frame = time.time()
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
#if verbose:
# print "Time to load frame: %s" % (time.time() - b4_frame)
# Increment frame counter
fc = fc + 1
# Report processing FPS
if verbose:
if fc % 100 == 0:
pfps = fc / (time.time() - t0)
print(' %10.1f %10.1f %10d %10.3f %10.1f' % (
t, area, blink, art_power, pfps))
t0 = time.time()
fc = 0
# wait whether user pressed esc to exit the experiment
key = utils._waitKey(1)
if key == 'ESC':
keep_going = False
# Clean up
if live_eyetracking:
cal_vout_stream.release()
cal_pupils_stream.close()
elif key == 'v' or not keep_going:
do_cal = False
print("Stopping calibration.")
# Clean up
if live_eyetracking:
cal_vout_stream.release()
cal_pupils_stream.close()
break
print(' Create calibration model')
C, central_fix = calibrate.AutoCalibrate(res_dir, cfg)
if not C.any():
print('* Empty calibration matrix detected - skipping')
try:
print(' Calibrate pupilometry')
calibrate.ApplyCalibration(ss_dir, C, central_fix, cfg)
except UnboundLocalError:
print(' No calibration data found')
cv2.destroyAllWindows()
vin_stream.release()
print('')
print(' Generate Report')
print(' ---------------')
report.WriteReport(ss_dir, cfg)
# Return pupilometry timeseries
return t, px, py, area, blink, art_power
def VideoPupilometry(data_dir, subj_sess, v_stub, cfg):
"""
Perform pupil boundary ellipse fitting on entire video
Arguments
----
data_dir : string
Root data directory path.
subj_sess : string
Subject/Session name used for subdirectory within data_dir
v_stub : string
Video filename stub, eg 'cal' or 'gaze'
cfg :
Analysis configuration parameters
Returns
----
pupils : boolean
Completion status (True = successful)
"""
# Output flags
verbose = cfg.getboolean('OUTPUT', 'verbose')
overwrite = cfg.getboolean('OUTPUT','overwrite')
# Video information
vin_ext = cfg.get('VIDEO', 'inputextension')
vout_ext = cfg.get('VIDEO' ,'outputextension')
vin_fps = cfg.getfloat('VIDEO', 'inputfps')
# Full video file paths
ss_dir = os.path.join(data_dir, subj_sess)
vid_dir = os.path.join(ss_dir, 'videos')
res_dir = os.path.join(ss_dir, 'results')
vin_path = os.path.join(vid_dir, v_stub + vin_ext)
vout_path = os.path.join(res_dir, v_stub + '_pupils' + vout_ext)
# Raw and filtered pupilometry CSV file paths
pupils_csv = os.path.join(res_dir, v_stub + '_pupils.csv')
# Check that input video file exists
if not os.path.isfile(vin_path):
print('* %s does not exist - returning' % vin_path)
return False
# Set up the LBP cascade classifier
LBP_path = os.path.join(utils._package_root(), 'Cascade/cascade.xml')
print(' Loading LBP cascade')
cascade = cv2.CascadeClassifier(LBP_path)
if cascade.empty():
print('* LBP cascade is empty - mrgaze installation problem')
return False
# Check for output CSV existance and overwrite flag
if os.path.isfile(pupils_csv):
print('+ Pupilometry output already exists - checking overwrite flag')
if overwrite:
print('+ Overwrite allowed - continuing')
else:
print('+ Overwrite forbidden - skipping pupilometry')
return True
#
# Input video
#
print(' Opening input video stream')
try:
vin_stream = cv2.VideoCapture(vin_path)
except:
print('* Problem opening input video stream - skipping pupilometry')
return False
if not vin_stream.isOpened():
print('* Video input stream not opened - skipping pupilometry')
return False
# Video FPS from metadata
# TODO: may not work with Quicktime videos
# fps = vin_stream.get(cv2.cv.CV_CAP_PROP_FPS)
# Total number of frames in video file
nf = vin_stream.get(cv2.CAP_PROP_FRAME_COUNT)
print(' Video has %d frames at %0.3f fps' % (nf, vin_fps))
# Read first preprocessed video frame from stream
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
# Get size of preprocessed frame for output video setup
nx, ny = frame.shape[1], frame.shape[0]
#
# Output video
#
print(' Opening output video stream')
# Output video codec (MP4V - poor quality compression)
fourcc = cv2.VideoWriter_fourcc('m','p','4','v')
try:
vout_stream = cv2.VideoWriter(vout_path, fourcc, 30, (nx, ny), True)
except:
print('* Problem creating output video stream - skipping pupilometry')
return False
if not vout_stream.isOpened():
print('* Output video not opened - skipping pupilometry')
return False
# Open pupilometry CSV file to write
try:
pupils_stream = open(pupils_csv, 'w')
except:
print('* Problem opening pupilometry CSV file - skipping pupilometry')
return False
#
# Main Video Frame Loop
#
# Print verbose column headers
if verbose:
print('')
print(' %10s %10s %10s %10s %10s %10s' % (
'Time (s)', '% Done', 'Area', 'Blink', 'Artifact', 'FPS'))
# Init frame counter
fc = 0
# Init processing timer
t0 = time.time()
while keep_going:
# Current video time in seconds
t = fc / vin_fps
# -------------------------------------
# Pass this frame to pupilometry engine
# -------------------------------------
pupil_ellipse, roi_rect, blink, glint, frame_rgb = engine.PupilometryEngine(frame, cascade, cfg)
# Derive pupilometry parameters
px, py, area = engine.PupilometryPars(pupil_ellipse, glint, cfg)
# Write data line to pupilometry CSV file
pupils_stream.write(
'%0.3f,%0.3f,%0.3f,%0.3f,%d,%0.3f,\n' %
(t, area, px, py, blink, art_power)
)
# Write output video frame
vout_stream.write(frame_rgb)
# Read next frame (if available)
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
# Increment frame counter
fc = fc + 1
# Report processing FPS
if verbose:
if fc % 100 == 0:
perc_done = fc / float(nf) * 100.0
pfps = fc / (time.time() - t0)
print(' %10.1f %10.1f %10.1f %10d %10.3f %10.1f' % (
t, perc_done, area, blink, art_power, pfps))
# Clean up
cv2.destroyAllWindows()
vin_stream.release()
vout_stream.release()
pupils_stream.close()
# Return pupilometry timeseries
return t, px, py, area, blink, art_power
| [
"mrgaze.media.Preproc",
"mrgaze.calibrate.AutoCalibrate",
"mrgaze.engine.PupilometryPars",
"cv2.destroyAllWindows",
"getpass.getuser",
"cv2.CascadeClassifier",
"mrgaze.media.LoadVideoFrame",
"mrgaze.config.LoadConfig",
"cv2.VideoWriter",
"os.path.isdir",
"cv2.VideoWriter_fourcc",
"mrgaze.utils... | [((2006, 2033), 'mrgaze.config.LoadConfig', 'config.LoadConfig', (['data_dir'], {}), '(data_dir)\n', (2023, 2033), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((2047, 2058), 'time.time', 'time.time', ([], {}), '()\n', (2056, 2058), False, 'import time\n'), ((2431, 2461), 'os.path.join', 'os.path.join', (['ss_dir', '"""videos"""'], {}), "(ss_dir, 'videos')\n", (2443, 2461), False, 'import os\n'), ((2476, 2507), 'os.path.join', 'os.path.join', (['ss_dir', '"""results"""'], {}), "(ss_dir, 'results')\n", (2488, 2507), False, 'import os\n'), ((2525, 2565), 'os.path.join', 'os.path.join', (['vid_dir', "('gaze' + vout_ext)"], {}), "(vid_dir, 'gaze' + vout_ext)\n", (2537, 2565), False, 'import os\n'), ((2586, 2625), 'os.path.join', 'os.path.join', (['vid_dir', "('cal' + vout_ext)"], {}), "(vid_dir, 'cal' + vout_ext)\n", (2598, 2625), False, 'import os\n'), ((2920, 2959), 'os.path.join', 'os.path.join', (['res_dir', '"""cal_pupils.csv"""'], {}), "(res_dir, 'cal_pupils.csv')\n", (2932, 2959), False, 'import os\n'), ((2977, 3017), 'os.path.join', 'os.path.join', (['res_dir', '"""gaze_pupils.csv"""'], {}), "(res_dir, 'gaze_pupils.csv')\n", (2989, 3017), False, 'import os\n'), ((3471, 3502), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['LBP_path'], {}), '(LBP_path)\n', (3492, 3502), False, 'import cv2\n'), ((3683, 3709), 'os.path.isfile', 'os.path.isfile', (['pupils_csv'], {}), '(pupils_csv)\n', (3697, 3709), False, 'import os\n'), ((5541, 5578), 'mrgaze.media.LoadVideoFrame', 'media.LoadVideoFrame', (['vin_stream', 'cfg'], {}), '(vin_stream, cfg)\n', (5561, 5578), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((15266, 15289), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (15287, 15289), False, 'import cv2\n'), ((15396, 15427), 'mrgaze.report.WriteReport', 'report.WriteReport', (['ss_dir', 'cfg'], {}), '(ss_dir, cfg)\n', (15414, 15427), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((16362, 16395), 'os.path.join', 'os.path.join', (['data_dir', 'subj_sess'], {}), '(data_dir, subj_sess)\n', (16374, 16395), False, 'import os\n'), ((16410, 16440), 'os.path.join', 'os.path.join', (['ss_dir', '"""videos"""'], {}), "(ss_dir, 'videos')\n", (16422, 16440), False, 'import os\n'), ((16455, 16486), 'os.path.join', 'os.path.join', (['ss_dir', '"""results"""'], {}), "(ss_dir, 'results')\n", (16467, 16486), False, 'import os\n'), ((16502, 16541), 'os.path.join', 'os.path.join', (['vid_dir', '(v_stub + vin_ext)'], {}), '(vid_dir, v_stub + vin_ext)\n', (16514, 16541), False, 'import os\n'), ((16558, 16610), 'os.path.join', 'os.path.join', (['res_dir', "(v_stub + '_pupils' + vout_ext)"], {}), "(res_dir, v_stub + '_pupils' + vout_ext)\n", (16570, 16610), False, 'import os\n'), ((16679, 16724), 'os.path.join', 'os.path.join', (['res_dir', "(v_stub + '_pupils.csv')"], {}), "(res_dir, v_stub + '_pupils.csv')\n", (16691, 16724), False, 'import os\n'), ((17050, 17081), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['LBP_path'], {}), '(LBP_path)\n', (17071, 17081), False, 'import cv2\n'), ((17262, 17288), 'os.path.isfile', 'os.path.isfile', (['pupils_csv'], {}), '(pupils_csv)\n', (17276, 17288), False, 'import os\n'), ((18290, 18327), 'mrgaze.media.LoadVideoFrame', 'media.LoadVideoFrame', (['vin_stream', 'cfg'], {}), '(vin_stream, cfg)\n', (18310, 18327), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((18692, 18734), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""m"""', '"""p"""', '"""4"""', '"""v"""'], {}), "('m', 'p', '4', 'v')\n", (18714, 18734), False, 'import cv2\n'), ((19572, 19583), 'time.time', 'time.time', ([], {}), '()\n', (19581, 19583), False, 'import time\n'), ((20932, 20955), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (20953, 20955), False, 'import cv2\n'), ((1822, 1839), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (1837, 1839), False, 'import getpass\n'), ((3072, 3094), 'os.path.isdir', 'os.path.isdir', (['res_dir'], {}), '(res_dir)\n', (3085, 3094), False, 'import os\n'), ((3104, 3124), 'os.makedirs', 'os.makedirs', (['res_dir'], {}), '(res_dir)\n', (3115, 3124), False, 'import os\n'), ((3194, 3216), 'os.path.isdir', 'os.path.isdir', (['vid_dir'], {}), '(vid_dir)\n', (3207, 3216), False, 'import os\n'), ((3226, 3246), 'os.makedirs', 'os.makedirs', (['vid_dir'], {}), '(vid_dir)\n', (3237, 3246), False, 'import os\n'), ((3375, 3396), 'mrgaze.utils._package_root', 'utils._package_root', ([], {}), '()\n', (3394, 3396), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((4501, 4520), 'mrgaze.utils._waitKey', 'utils._waitKey', (['(500)'], {}), '(500)\n', (4515, 4520), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((5625, 5655), 'mrgaze.media.Preproc', 'media.Preproc', (['frame_orig', 'cfg'], {}), '(frame_orig, cfg)\n', (5638, 5655), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((15130, 15185), 'mrgaze.calibrate.ApplyCalibration', 'calibrate.ApplyCalibration', (['ss_dir', 'C', 'central_fix', 'cfg'], {}), '(ss_dir, C, central_fix, cfg)\n', (15156, 15185), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((16778, 16802), 'os.path.isfile', 'os.path.isfile', (['vin_path'], {}), '(vin_path)\n', (16792, 16802), False, 'import os\n'), ((16954, 16975), 'mrgaze.utils._package_root', 'utils._package_root', ([], {}), '()\n', (16973, 16975), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((17653, 17679), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vin_path'], {}), '(vin_path)\n', (17669, 17679), False, 'import cv2\n'), ((18374, 18404), 'mrgaze.media.Preproc', 'media.Preproc', (['frame_orig', 'cfg'], {}), '(frame_orig, cfg)\n', (18387, 18404), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((18764, 18818), 'cv2.VideoWriter', 'cv2.VideoWriter', (['vout_path', 'fourcc', '(30)', '(nx, ny)', '(True)'], {}), '(vout_path, fourcc, 30, (nx, ny), True)\n', (18779, 18818), False, 'import cv2\n'), ((19877, 19922), 'mrgaze.engine.PupilometryEngine', 'engine.PupilometryEngine', (['frame', 'cascade', 'cfg'], {}), '(frame, cascade, cfg)\n', (19901, 19922), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((19987, 20036), 'mrgaze.engine.PupilometryPars', 'engine.PupilometryPars', (['pupil_ellipse', 'glint', 'cfg'], {}), '(pupil_ellipse, glint, cfg)\n', (20009, 20036), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((20375, 20412), 'mrgaze.media.LoadVideoFrame', 'media.LoadVideoFrame', (['vin_stream', 'cfg'], {}), '(vin_stream, cfg)\n', (20395, 20412), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((1708, 1725), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (1717, 1725), False, 'import os\n'), ((1789, 1799), 'os.uname', 'os.uname', ([], {}), '()\n', (1797, 1799), False, 'import os\n'), ((4107, 4133), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vin_path'], {}), '(vin_path)\n', (4123, 4133), False, 'import cv2\n'), ((4163, 4193), 'cv2.VideoCapture', 'cv2.VideoCapture', (['cal_vin_path'], {}), '(cal_vin_path)\n', (4179, 4193), False, 'import cv2\n'), ((4233, 4259), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vin_path'], {}), '(vin_path)\n', (4249, 4259), False, 'import cv2\n'), ((7371, 7382), 'time.time', 'time.time', ([], {}), '()\n', (7380, 7382), False, 'import time\n'), ((11817, 11828), 'time.time', 'time.time', ([], {}), '()\n', (11826, 11828), False, 'import time\n'), ((14933, 14970), 'mrgaze.calibrate.AutoCalibrate', 'calibrate.AutoCalibrate', (['res_dir', 'cfg'], {}), '(res_dir, cfg)\n', (14956, 14970), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((20467, 20497), 'mrgaze.media.Preproc', 'media.Preproc', (['frame_orig', 'cfg'], {}), '(frame_orig, cfg)\n', (20480, 20497), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((6271, 6307), 'cv2.cv.CV_FOURCC', 'cv2.cv.CV_FOURCC', (['"""m"""', '"""p"""', '"""4"""', '"""v"""'], {}), "('m', 'p', '4', 'v')\n", (6287, 6307), False, 'import cv2\n'), ((7914, 7925), 'time.time', 'time.time', ([], {}), '()\n', (7923, 7925), False, 'import time\n'), ((8204, 8249), 'mrgaze.engine.PupilometryEngine', 'engine.PupilometryEngine', (['frame', 'cascade', 'cfg'], {}), '(frame, cascade, cfg)\n', (8228, 8249), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((8402, 8451), 'mrgaze.engine.PupilometryPars', 'engine.PupilometryPars', (['pupil_ellipse', 'glint', 'cfg'], {}), '(pupil_ellipse, glint, cfg)\n', (8424, 8451), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((9729, 9746), 'mrgaze.utils._waitKey', 'utils._waitKey', (['(1)'], {}), '(1)\n', (9743, 9746), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((10698, 10734), 'cv2.cv.CV_FOURCC', 'cv2.cv.CV_FOURCC', (['"""m"""', '"""p"""', '"""4"""', '"""v"""'], {}), "('m', 'p', '4', 'v')\n", (10714, 10734), False, 'import cv2\n'), ((12358, 12369), 'time.time', 'time.time', ([], {}), '()\n', (12367, 12369), False, 'import time\n'), ((12648, 12693), 'mrgaze.engine.PupilometryEngine', 'engine.PupilometryEngine', (['frame', 'cascade', 'cfg'], {}), '(frame, cascade, cfg)\n', (12672, 12693), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((12846, 12895), 'mrgaze.engine.PupilometryPars', 'engine.PupilometryPars', (['pupil_ellipse', 'glint', 'cfg'], {}), '(pupil_ellipse, glint, cfg)\n', (12868, 12895), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((13431, 13468), 'mrgaze.media.LoadVideoFrame', 'media.LoadVideoFrame', (['vin_stream', 'cfg'], {}), '(vin_stream, cfg)\n', (13451, 13468), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((14266, 14283), 'mrgaze.utils._waitKey', 'utils._waitKey', (['(1)'], {}), '(1)\n', (14280, 14283), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((6362, 6416), 'cv2.VideoWriter', 'cv2.VideoWriter', (['vout_path', 'fourcc', '(30)', '(nx, ny)', '(True)'], {}), '(vout_path, fourcc, 30, (nx, ny), True)\n', (6377, 6416), False, 'import cv2\n'), ((9000, 9037), 'mrgaze.media.LoadVideoFrame', 'media.LoadVideoFrame', (['vin_stream', 'cfg'], {}), '(vin_stream, cfg)\n', (9020, 9037), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((9108, 9138), 'mrgaze.media.Preproc', 'media.Preproc', (['frame_orig', 'cfg'], {}), '(frame_orig, cfg)\n', (9121, 9138), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((10792, 10850), 'cv2.VideoWriter', 'cv2.VideoWriter', (['cal_vout_path', 'fourcc', '(30)', '(nx, ny)', '(True)'], {}), '(cal_vout_path, fourcc, 30, (nx, ny), True)\n', (10807, 10850), False, 'import cv2\n'), ((13539, 13569), 'mrgaze.media.Preproc', 'media.Preproc', (['frame_orig', 'cfg'], {}), '(frame_orig, cfg)\n', (13552, 13569), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((1919, 1930), 'time.time', 'time.time', ([], {}), '()\n', (1928, 1930), False, 'import time\n'), ((7605, 7641), 'os.path.join', 'os.path.join', (['data_dir', '"""mrgaze.cfg"""'], {}), "(data_dir, 'mrgaze.cfg')\n", (7617, 7641), False, 'import os\n'), ((7772, 7799), 'mrgaze.config.LoadConfig', 'config.LoadConfig', (['data_dir'], {}), '(data_dir)\n', (7789, 7799), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((7833, 7844), 'time.time', 'time.time', ([], {}), '()\n', (7842, 7844), False, 'import time\n'), ((9592, 9603), 'time.time', 'time.time', ([], {}), '()\n', (9601, 9603), False, 'import time\n'), ((12049, 12085), 'os.path.join', 'os.path.join', (['data_dir', '"""mrgaze.cfg"""'], {}), "(data_dir, 'mrgaze.cfg')\n", (12061, 12085), False, 'import os\n'), ((12216, 12243), 'mrgaze.config.LoadConfig', 'config.LoadConfig', (['data_dir'], {}), '(data_dir)\n', (12233, 12243), False, 'from mrgaze import media, utils, config, calibrate, report, engine\n'), ((12277, 12288), 'time.time', 'time.time', ([], {}), '()\n', (12286, 12288), False, 'import time\n'), ((14129, 14140), 'time.time', 'time.time', ([], {}), '()\n', (14138, 14140), False, 'import time\n'), ((20758, 20769), 'time.time', 'time.time', ([], {}), '()\n', (20767, 20769), False, 'import time\n'), ((9412, 9423), 'time.time', 'time.time', ([], {}), '()\n', (9421, 9423), False, 'import time\n'), ((13949, 13960), 'time.time', 'time.time', ([], {}), '()\n', (13958, 13960), False, 'import time\n')] |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Author : Alenx.Hai <<EMAIL>>
# created time: 2020/12/21-10:49 上午
import asyncio
from src.mysql_elastic import MySQLElasticSearch
@asyncio.coroutine
def main():
elastic = MySQLElasticSearch()
yield elastic.put_data()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait([main()]))
| [
"asyncio.get_event_loop",
"src.mysql_elastic.MySQLElasticSearch"
] | [((224, 244), 'src.mysql_elastic.MySQLElasticSearch', 'MySQLElasticSearch', ([], {}), '()\n', (242, 244), False, 'from src.mysql_elastic import MySQLElasticSearch\n'), ((314, 338), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (336, 338), False, 'import asyncio\n')] |
# Crie um módulo chamado moeda.py que tenha as funções incorporadas aumentar(), diminuir(), dobro() e metade(). Faça
# também um programa que importe esse módulo e use algumas dessas funções.
import moeda
p = float(input('Digite o preço: '))
print(f'A metade do {p} é R${moeda.metade(p)}')
print(f'O dobro de {p} é R${moeda.dobro(p)}')
print(f'Aumentando 10%, temos R${moeda.aumentar(p, 10)}')
| [
"moeda.aumentar",
"moeda.metade",
"moeda.dobro"
] | [((274, 289), 'moeda.metade', 'moeda.metade', (['p'], {}), '(p)\n', (286, 289), False, 'import moeda\n'), ((321, 335), 'moeda.dobro', 'moeda.dobro', (['p'], {}), '(p)\n', (332, 335), False, 'import moeda\n'), ((371, 392), 'moeda.aumentar', 'moeda.aumentar', (['p', '(10)'], {}), '(p, 10)\n', (385, 392), False, 'import moeda\n')] |
"""Class to hold all cover accessories."""
import logging
from homeassistant.components.cover import ATTR_CURRENT_POSITION
from homeassistant.helpers.event import async_track_state_change
from . import TYPES
from .accessories import HomeAccessory, add_preload_service
from .const import (
SERV_WINDOW_COVERING, CHAR_CURRENT_POSITION,
CHAR_TARGET_POSITION, CHAR_POSITION_STATE)
_LOGGER = logging.getLogger(__name__)
@TYPES.register('Window')
class Window(HomeAccessory):
"""Generate a Window accessory for a cover entity.
The cover entity must support: set_cover_position.
"""
def __init__(self, hass, entity_id, display_name):
"""Initialize a Window accessory object."""
super().__init__(display_name, entity_id, 'WINDOW')
self._hass = hass
self._entity_id = entity_id
self.current_position = None
self.homekit_target = None
self.serv_cover = add_preload_service(self, SERV_WINDOW_COVERING)
self.char_current_position = self.serv_cover. \
get_characteristic(CHAR_CURRENT_POSITION)
self.char_target_position = self.serv_cover. \
get_characteristic(CHAR_TARGET_POSITION)
self.char_position_state = self.serv_cover. \
get_characteristic(CHAR_POSITION_STATE)
self.char_target_position.setter_callback = self.move_cover
def run(self):
"""Method called be object after driver is started."""
state = self._hass.states.get(self._entity_id)
self.update_cover_position(new_state=state)
async_track_state_change(
self._hass, self._entity_id, self.update_cover_position)
def move_cover(self, value):
"""Move cover to value if call came from HomeKit."""
if value != self.current_position:
_LOGGER.debug("%s: Set position to %d", self._entity_id, value)
self.homekit_target = value
if value > self.current_position:
self.char_position_state.set_value(1)
elif value < self.current_position:
self.char_position_state.set_value(0)
self._hass.services.call(
'cover', 'set_cover_position',
{'entity_id': self._entity_id, 'position': value})
def update_cover_position(self, entity_id=None, old_state=None,
new_state=None):
"""Update cover position after state changed."""
if new_state is None:
return
current_position = new_state.attributes[ATTR_CURRENT_POSITION]
if current_position is None:
return
self.current_position = int(current_position)
self.char_current_position.set_value(self.current_position)
if self.homekit_target is None or \
abs(self.current_position - self.homekit_target) < 6:
self.char_target_position.set_value(self.current_position)
self.char_position_state.set_value(2)
self.homekit_target = None
| [
"logging.getLogger",
"homeassistant.helpers.event.async_track_state_change"
] | [((399, 426), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (416, 426), False, 'import logging\n'), ((1574, 1660), 'homeassistant.helpers.event.async_track_state_change', 'async_track_state_change', (['self._hass', 'self._entity_id', 'self.update_cover_position'], {}), '(self._hass, self._entity_id, self.\n update_cover_position)\n', (1598, 1660), False, 'from homeassistant.helpers.event import async_track_state_change\n')] |
import unittest
from yapper import create_app, db
from yapper.blueprints.user.models import User, Role
class TestUserAddToDb(unittest.TestCase):
def setUp(self):
self.app = create_app('test')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_role_gets_id(self):
role = Role(name='admin')
self.assertTrue(role.id is None)
db.session.add(role)
db.session.commit()
self.assertFalse(role.id is None)
def test_user_gets_role_and_id(self):
role = Role(name='administrator')
self.assertTrue(role.id is None)
user = User(email='<EMAIL>', password='<PASSWORD>', role=role)
self.assertTrue(user.id is None)
db.session.add(user)
db.session.commit()
self.assertFalse(role.id is None)
self.assertFalse(user.id is None)
self.assertTrue(user.role_id == role.id)
self.assertTrue(user.is_admin())
| [
"yapper.db.drop_all",
"yapper.create_app",
"yapper.db.session.add",
"yapper.db.session.remove",
"yapper.blueprints.user.models.Role",
"yapper.db.create_all",
"yapper.db.session.commit",
"yapper.blueprints.user.models.User"
] | [((187, 205), 'yapper.create_app', 'create_app', (['"""test"""'], {}), "('test')\n", (197, 205), False, 'from yapper import create_app, db\n'), ((296, 311), 'yapper.db.create_all', 'db.create_all', ([], {}), '()\n', (309, 311), False, 'from yapper import create_app, db\n'), ((345, 364), 'yapper.db.session.remove', 'db.session.remove', ([], {}), '()\n', (362, 364), False, 'from yapper import create_app, db\n'), ((373, 386), 'yapper.db.drop_all', 'db.drop_all', ([], {}), '()\n', (384, 386), False, 'from yapper import create_app, db\n'), ((467, 485), 'yapper.blueprints.user.models.Role', 'Role', ([], {'name': '"""admin"""'}), "(name='admin')\n", (471, 485), False, 'from yapper.blueprints.user.models import User, Role\n'), ((535, 555), 'yapper.db.session.add', 'db.session.add', (['role'], {}), '(role)\n', (549, 555), False, 'from yapper import create_app, db\n'), ((564, 583), 'yapper.db.session.commit', 'db.session.commit', ([], {}), '()\n', (581, 583), False, 'from yapper import create_app, db\n'), ((684, 710), 'yapper.blueprints.user.models.Role', 'Role', ([], {'name': '"""administrator"""'}), "(name='administrator')\n", (688, 710), False, 'from yapper.blueprints.user.models import User, Role\n'), ((767, 822), 'yapper.blueprints.user.models.User', 'User', ([], {'email': '"""<EMAIL>"""', 'password': '"""<PASSWORD>"""', 'role': 'role'}), "(email='<EMAIL>', password='<PASSWORD>', role=role)\n", (771, 822), False, 'from yapper.blueprints.user.models import User, Role\n'), ((872, 892), 'yapper.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (886, 892), False, 'from yapper import create_app, db\n'), ((901, 920), 'yapper.db.session.commit', 'db.session.commit', ([], {}), '()\n', (918, 920), False, 'from yapper import create_app, db\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from bibdeskparser.customization import InvalidName, splitname
class TestSplitnameMethod(unittest.TestCase):
def test_splitname_basic(self):
"""Basic tests of customization.splitname() """
# Empty input.
result = splitname("")
expected = {}
self.assertEqual(result, expected, msg="Invalid output for empty name")
# Non-whitespace names.
result = splitname(" ")
expected = {}
self.assertEqual(
result, expected, msg="Invalid output for space-only name"
)
result = splitname(" \t~~")
expected = {}
self.assertEqual(
result, expected, msg="Invalid output for whitespace name"
)
# Test strict mode.
with self.assertRaises(InvalidName): # Trailing comma (4 cases).
splitname("BB,", strict_mode=True)
with self.assertRaises(InvalidName):
splitname("BB, ", strict_mode=True)
with self.assertRaises(InvalidName):
splitname("BB, ~\t", strict_mode=True)
with self.assertRaises(InvalidName):
splitname(", ~\t", strict_mode=True)
with self.assertRaises(InvalidName): # Too many sections.
splitname("AA, BB, CC, DD", strict_mode=True)
with self.assertRaises(
InvalidName
): # Unterminated opening brace (x3).
splitname("AA {BB CC", strict_mode=True)
with self.assertRaises(InvalidName):
splitname("AA {{{BB CC", strict_mode=True)
with self.assertRaises(InvalidName):
splitname("AA {{{BB} CC}", strict_mode=True)
with self.assertRaises(InvalidName): # Unmatched closing brace (x3).
splitname("AA BB CC}", strict_mode=True)
with self.assertRaises(InvalidName):
splitname("AA BB CC}}}", strict_mode=True)
with self.assertRaises(InvalidName):
splitname("{AA {BB CC}}}", strict_mode=True)
# Test strict mode off for trailing comma.
expected = {'first': [], 'von': [], 'last': ["BB"], 'jr': []}
result = splitname("BB,", strict_mode=False)
self.assertEqual(
result,
expected,
msg="Invalid output for trailing comma with strict mode off",
)
result = splitname("BB, ", strict_mode=False)
self.assertEqual(
result,
expected,
msg="Invalid output for trailing comma with strict mode off",
)
result = splitname("BB, ~\t ", strict_mode=False)
self.assertEqual(
result,
expected,
msg="Invalid output for trailing comma with strict mode off",
)
expected = {}
result = splitname(", ~\t", strict_mode=False)
self.assertEqual(
result,
expected,
msg="Invalid output for trailing comma with strict mode off",
)
# Test strict mode off for too many sections.
expected = {
'first': ["CC", "DD"],
'von': [],
'last': ["AA"],
'jr': ["BB"],
}
result = splitname("AA, BB, CC, DD", strict_mode=False)
self.assertEqual(
result,
expected,
msg="Invalid output for too many sections with strict mode off",
)
# Test strict mode off for an unterminated opening brace.
result = splitname("AA {BB CC", strict_mode=False)
expected = {'first': ["AA"], 'von': [], 'last': ["{BB CC}"], 'jr': []}
self.assertEqual(
result,
expected,
msg="Invalid output for unterminated opening brace with strict mode off",
)
result = splitname("AA {{{BB CC", strict_mode=False)
expected = {
'first': ["AA"],
'von': [],
'last': ["{{{BB CC}}}"],
'jr': [],
}
self.assertEqual(
result,
expected,
msg="Invalid output for unterminated opening brace with strict mode off",
)
result = splitname("AA {{{BB} CC}", strict_mode=False)
expected = {
'first': ["AA"],
'von': [],
'last': ["{{{BB} CC}}"],
'jr': [],
}
self.assertEqual(
result,
expected,
msg="Invalid output for unterminated opening brace with strict mode off",
)
# Test strict mode off for an unmatched closing brace.
result = splitname("AA BB CC}", strict_mode=False)
expected = {
'first': ["AA", "BB"],
'von': [],
'last': ["{CC}"],
'jr': [],
}
self.assertEqual(
result,
expected,
msg="Invalid output for unmatched closing brace with strict mode off",
)
result = splitname("AA BB CC}}}", strict_mode=False)
expected = {
'first': ["AA", "BB"],
'von': [],
'last': ["{{{CC}}}"],
'jr': [],
}
self.assertEqual(
result,
expected,
msg="Invalid output for unmatched closing brace with strict mode off",
)
result = splitname("{AA {BB CC}}}", strict_mode=False)
expected = {
'first': [],
'von': [],
'last': ["{{AA {BB CC}}}"],
'jr': [],
}
self.assertEqual(
result,
expected,
msg="Invalid output for unmatched closing brace with strict mode off",
)
# Test it handles commas at higher brace levels.
result = splitname("CC, dd, {AA, BB}")
expected = {
'first': ["{AA, BB}"],
'von': [],
'last': ["CC"],
'jr': ["dd"],
}
self.assertEqual(
result, expected, msg="Invalid output for braced commas"
)
def test_splitname_cases(self):
"""Test customization.splitname() vs output from BibTeX """
for name, expected in splitname_test_cases:
result = splitname(name)
self.assertEqual(
result, expected, msg="Input name: {0}".format(name)
)
splitname_test_cases = (
(
r'<NAME>',
{'first': ['Per', 'Brinch'], 'von': [], 'last': ['Hansen'], 'jr': []},
),
(
r'<NAME>, Per',
{'first': ['Per'], 'von': [], 'last': ['Brinch', 'Hansen'], 'jr': []},
),
(
r'<NAME>,, Per',
{'first': ['Per'], 'von': [], 'last': ['Brinch', 'Hansen'], 'jr': []},
),
(
r"<NAME> <NAME>{\'e}<NAME>",
{
'first': ['Charles', 'Louis', 'Xavier', 'Joseph'],
'von': ['de', 'la'],
'last': [r'Vall{\'e}e', 'Poussin'],
'jr': [],
},
),
(
r'D[<NAME>',
{'first': ['D[onald]', 'E.'], 'von': [], 'last': ['Knuth'], 'jr': []},
),
(
r'A. {Delgado de Molina}',
{
'first': ['A.'],
'von': [],
'last': ['{Delgado de Molina}'],
'jr': [],
},
),
(
r"<NAME>{\'e}",
{'first': ['M.'], 'von': [], 'last': [r"Vign{\'e}"], 'jr': []},
),
###############################################################################
#
# Test cases from
# http://maverick.inria.fr/~Xavier.Decoret/resources/xdkbibtex/bibtex_summary.html
#
###############################################################################
(r'AA BB', {'first': ['AA'], 'von': [], 'last': ['BB'], 'jr': []}),
(r'AA', {'first': [], 'von': [], 'last': ['AA'], 'jr': []}),
(r'AA bb', {'first': ['AA'], 'von': [], 'last': ['bb'], 'jr': []}),
(r'aa', {'first': [], 'von': [], 'last': ['aa'], 'jr': []}),
(r'AA bb CC', {'first': ['AA'], 'von': ['bb'], 'last': ['CC'], 'jr': []}),
(
r'AA bb CC dd EE',
{'first': ['AA'], 'von': ['bb', 'CC', 'dd'], 'last': ['EE'], 'jr': []},
),
(
r'AA 1B cc dd',
{'first': ['AA', '1B'], 'von': ['cc'], 'last': ['dd'], 'jr': []},
),
(
r'AA 1b cc dd',
{'first': ['AA'], 'von': ['1b', 'cc'], 'last': ['dd'], 'jr': []},
),
(
r'AA {b}B cc dd',
{'first': ['AA', '{b}B'], 'von': ['cc'], 'last': ['dd'], 'jr': []},
),
(
r'AA {b}b cc dd',
{'first': ['AA'], 'von': ['{b}b', 'cc'], 'last': ['dd'], 'jr': []},
),
(
r'AA {B}b cc dd',
{'first': ['AA'], 'von': ['{B}b', 'cc'], 'last': ['dd'], 'jr': []},
),
(
r'AA {B}B cc dd',
{'first': ['AA', '{B}B'], 'von': ['cc'], 'last': ['dd'], 'jr': []},
),
(
r'AA \BB{b} cc dd',
{'first': ['AA', r'\BB{b}'], 'von': ['cc'], 'last': ['dd'], 'jr': []},
),
(
r'AA \bb{b} cc dd',
{'first': ['AA'], 'von': [r'\bb{b}', 'cc'], 'last': ['dd'], 'jr': []},
),
(
r'AA {bb} cc DD',
{'first': ['AA', '{bb}'], 'von': ['cc'], 'last': ['DD'], 'jr': []},
),
(
r'AA bb {cc} DD',
{'first': ['AA'], 'von': ['bb'], 'last': ['{cc}', 'DD'], 'jr': []},
),
(
r'AA {bb} CC',
{'first': ['AA', '{bb}'], 'von': [], 'last': ['CC'], 'jr': []},
),
(r'bb CC, AA', {'first': ['AA'], 'von': ['bb'], 'last': ['CC'], 'jr': []}),
(r'bb CC, aa', {'first': ['aa'], 'von': ['bb'], 'last': ['CC'], 'jr': []}),
(
r'bb CC dd EE, AA',
{'first': ['AA'], 'von': ['bb', 'CC', 'dd'], 'last': ['EE'], 'jr': []},
),
(r'bb, AA', {'first': ['AA'], 'von': [], 'last': ['bb'], 'jr': []}),
(
r'bb CC,XX, AA',
{'first': ['AA'], 'von': ['bb'], 'last': ['CC'], 'jr': ['XX']},
),
(
r'bb CC,xx, AA',
{'first': ['AA'], 'von': ['bb'], 'last': ['CC'], 'jr': ['xx']},
),
(r'BB,, AA', {'first': ['AA'], 'von': [], 'last': ['BB'], 'jr': []}),
(
r"Paul \'<NAME>",
{
'first': ['Paul', r"\'Emile"],
'von': [],
'last': ['Victor'],
'jr': [],
},
),
(
r"Paul {\'E}<NAME>",
{
'first': ['Paul', r"{\'E}mile"],
'von': [],
'last': ['Victor'],
'jr': [],
},
),
(
r"Paul \'emile Victor",
{'first': ['Paul'], 'von': [r"\'emile"], 'last': ['Victor'], 'jr': []},
),
(
r"Paul {\'e}<NAME>",
{
'first': ['Paul'],
'von': [r"{\'e}mile"],
'last': ['Victor'],
'jr': [],
},
),
(
r"Victor, Paul \'Emile",
{
'first': ['Paul', r"\'Emile"],
'von': [],
'last': ['Victor'],
'jr': [],
},
),
(
r"Victor, Paul {\'E}mile",
{
'first': ['Paul', r"{\'E}mile"],
'von': [],
'last': ['Victor'],
'jr': [],
},
),
(
r"Victor, Paul \'emile",
{
'first': ['Paul', r"\'emile"],
'von': [],
'last': ['Victor'],
'jr': [],
},
),
(
r"Victor, Paul {\'e}mile",
{
'first': ['Paul', r"{\'e}mile"],
'von': [],
'last': ['Victor'],
'jr': [],
},
),
(
r'<NAME>',
{
'first': ['Dominique', 'Galouzeau'],
'von': ['de'],
'last': ['Villepin'],
'jr': [],
},
),
(
r'Dominique {G}<NAME>',
{
'first': ['Dominique'],
'von': ['{G}alouzeau', 'de'],
'last': ['Villepin'],
'jr': [],
},
),
(
r'Galouzeau de Villepin, Dominique',
{
'first': ['Dominique'],
'von': ['Galouzeau', 'de'],
'last': ['Villepin'],
'jr': [],
},
),
###############################################################################
#
# Test cases from pybtex
# See file /pybtex/tests/parse_name_test.py in the pybtex source.
#
###############################################################################
(
r'<NAME>. Siegman',
{'first': ['A.', 'E.'], 'von': [], 'last': ['Siegman'], 'jr': []},
),
(
r'<NAME>',
{
'first': ['A.', 'G.', 'W.'],
'von': [],
'last': ['Cameron'],
'jr': [],
},
),
(r'<NAME>', {'first': ['A.'], 'von': [], 'last': ['Hoenig'], 'jr': []}),
(
r'<NAME>',
{
'first': ['A.', 'J.', 'Van'],
'von': [],
'last': ['Haagen'],
'jr': [],
},
),
(
r'<NAME>',
{'first': ['A.', 'S.'], 'von': [], 'last': ['Berdnikov'], 'jr': []},
),
(
r'<NAME>',
{'first': ['A.'], 'von': [], 'last': ['Trevorrow'], 'jr': []},
),
(
r'<NAME>',
{'first': ['Adam', 'H.'], 'von': [], 'last': ['Lewenberg'], 'jr': []},
),
(
r'Addison-Wesley Publishing Company',
{
'first': ['Addison-Wesley', 'Publishing'],
'von': [],
'last': ['Company'],
'jr': [],
},
),
(
r'Advogato (<NAME>)',
{
'first': ['Advogato', '(Raph'],
'von': [],
'last': ['Levien)'],
'jr': [],
},
),
(
r'<NAME>',
{
'first': ['Andrea'],
'von': ['de', 'Leeuw', 'van'],
'last': ['Weenen'],
'jr': [],
},
),
(
r'<NAME>',
{'first': ['Andreas'], 'von': [], 'last': ['Geyer-Schulz'], 'jr': []},
),
(
r'Andr{\'<NAME>',
{'first': [r'Andr{\'e}'], 'von': [], 'last': ['Heck'], 'jr': []},
),
(
r'<NAME>{\"u}ggemann-Klein',
{
'first': ['Anne'],
'von': [],
'last': [r'Br{\"u}ggemann-Klein'],
'jr': [],
},
),
(r'Anonymous', {'first': [], 'von': [], 'last': ['Anonymous'], 'jr': []}),
(r'<NAME>', {'first': ['B.'], 'von': [], 'last': ['Beeton'], 'jr': []}),
(
r'<NAME>',
{'first': ['B.', 'Hamilton'], 'von': [], 'last': ['Kelly'], 'jr': []},
),
(
r'<NAME>',
{
'first': ['B.', 'V.', 'Venkata', 'Krishna'],
'von': [],
'last': ['Sastry'],
'jr': [],
},
),
(
r'<NAME>',
{'first': ['Benedict'], 'von': [], 'last': [r'L{\o}fstedt'], 'jr': []},
),
(
r'<NAME>',
{'first': ['Bogus{\l}aw'], 'von': [], 'last': ['Jackowski'], 'jr': []},
),
(
r'<NAME>.\ Thiele',
{
'first': ['Christina', 'A.', 'L.\\'],
'von': [],
'last': ['Thiele'],
'jr': [],
},
),
(
r"<NAME>",
{'first': ['D.'], 'von': [], 'last': ["Men'shikov"], 'jr': []},
),
(
r'Darko \v{Z}ubrini{\'c}',
{
'first': ['Darko'],
'von': [],
'last': [r'\v{Z}ubrini{\'c}'],
'jr': [],
},
),
(
r'<NAME>{\'c}',
{'first': ['Dunja'], 'von': [], 'last': [r'Mladeni{\'c}'], 'jr': []},
),
(
r'<NAME>. {Bell, II}',
{
'first': ['Edwin', 'V.'],
'von': [],
'last': ['{Bell, II}'],
'jr': [],
},
),
(
r'<NAME>. {Bennett, Jr.}',
{
'first': ['Frank', 'G.'],
'von': [],
'last': ['{Bennett, Jr.}'],
'jr': [],
},
),
(
r'Fr{\'e}d{\'e}<NAME>',
{
'first': [r'Fr{\'e}d{\'e}ric'],
'von': [],
'last': ['Boulanger'],
'jr': [],
},
),
(
r'Ford, Jr., Henry',
{'first': ['Henry'], 'von': [], 'last': ['Ford'], 'jr': ['Jr.']},
),
(
r'mr Ford, Jr., Henry',
{'first': ['Henry'], 'von': ['mr'], 'last': ['Ford'], 'jr': ['Jr.']},
),
(r'<NAME>', {'first': ['Fukui'], 'von': [], 'last': ['Rei'], 'jr': []}),
(
r'<NAME>',
{'first': ['G.'], 'von': [], 'last': [r'Gr{\"a}tzer'], 'jr': []},
),
(
r'<NAME>',
{'first': ['George'], 'von': [], 'last': [r'Gr{\"a}tzer'], 'jr': []},
),
(
r'<NAME>',
{
'first': ['Georgia', 'K.', 'M.'],
'von': [],
'last': ['Tobin'],
'jr': [],
},
),
(
r'<NAME>',
{
'first': ['Gilbert'],
'von': ['van', 'den'],
'last': ['Dobbelsteen'],
'jr': [],
},
),
(
r'Gy{\"o}ngyi Bujdos{\'o}',
{
'first': [r'Gy{\"o}ngyi'],
'von': [],
'last': [r'Bujdos{\'o}'],
'jr': [],
},
),
(
r'<NAME>}rgensen',
{'first': ['Helmut'], 'von': [], 'last': [r'J{\"u}rgensen'], 'jr': []},
),
(
r'<NAME>{\ss}',
{'first': ['Herbert'], 'von': [], 'last': ['Vo{\ss}'], 'jr': []},
),
(
r"H{\'a}n Th{\^e}\llap{\raise 0.5ex\hbox{\'{\relax}}} Th{\'a}nh",
{
'first': [
r'H{\'a}n',
r"Th{\^e}\llap{\raise 0.5ex\hbox{\'{\relax}}}",
],
'von': [],
'last': [r"Th{\'a}nh"],
'jr': [],
},
),
(
r"H{\`a}n Th\^e\llap{\raise0.5ex\hbox{\'{\relax}}} Th{\`a}nh",
{
'first': [r'H{\`a}n', r"Th\^e\llap{\raise0.5ex\hbox{\'{\relax}}}"],
'von': [],
'last': [r"Th{\`a}nh"],
'jr': [],
},
),
(
r'<NAME>{\'y}',
{'first': ['J.'], 'von': [], 'last': [r'Vesel{\'y}'], 'jr': []},
),
(
r'<NAME>\'{\i}<NAME>',
{
'first': ['Javier', r'Rodr\'{\i}guez'],
'von': [],
'last': ['Laguna'],
'jr': [],
},
),
(
r'Ji\v{r}\'{\i} Vesel{\'y}',
{
'first': [r'Ji\v{r}\'{\i}'],
'von': [],
'last': [r'Vesel{\'y}'],
'jr': [],
},
),
(
r'Ji\v{r}\'{\i} Zlatu{\v{s}}ka',
{
'first': [r'Ji\v{r}\'{\i}'],
'von': [],
'last': [r'Zlatu{\v{s}}ka'],
'jr': [],
},
),
(
r'Ji\v{r}{\'\i} Vesel{\'y}',
{
'first': [r'Ji\v{r}{\'\i}'],
'von': [],
'last': [r'Vesel{\'y}'],
'jr': [],
},
),
(
r'Ji\v{r}{\'{\i}}Zlatu{\v{s}}ka',
{
'first': [],
'von': [],
'last': [r'Ji\v{r}{\'{\i}}Zlatu{\v{s}}ka'],
'jr': [],
},
),
(
r'<NAME>',
{'first': ['Jim'], 'von': [], 'last': ['Hef{}feron'], 'jr': []},
),
(
r'<NAME>',
{'first': [r'J{\"o}rg'], 'von': [], 'last': ['Knappen'], 'jr': []},
),
(
r'<NAME>',
{
'first': [r'J{\"o}rgen', 'L.'],
'von': [],
'last': ['Pind'],
'jr': [],
},
),
(
r'J{\'e}r\^ome Laurens',
{'first': [r'J{\'e}r\^ome'], 'von': [], 'last': ['Laurens'], 'jr': []},
),
(
r'J{{\"o}}<NAME>',
{'first': [r'J{{\"o}}rg'], 'von': [], 'last': ['Knappen'], 'jr': []},
),
(
r'<NAME>',
{'first': ['K.', 'Anil'], 'von': [], 'last': ['Kumar'], 'jr': []},
),
(
r'<NAME>{\'a}k',
{'first': ['Karel'], 'von': [], 'last': [r'Hor{\'a}k'], 'jr': []},
),
(
r'<NAME>\'{\i}{\v{s}}ka',
{
'first': ['Karel'],
'von': [],
'last': [r'P\'{\i}{\v{s}}ka'],
'jr': [],
},
),
(
r'<NAME>{\'\i}{\v{s}}ka',
{
'first': ['Karel'],
'von': [],
'last': [r'P{\'\i}{\v{s}}ka'],
'jr': [],
},
),
(
r'<NAME>\'{y}',
{'first': ['Karel'], 'von': [], 'last': [r'Skoup\'{y}'], 'jr': []},
),
(
r'<NAME>{\'y}',
{'first': ['Karel'], 'von': [], 'last': [r'Skoup{\'y}'], 'jr': []},
),
(
r'<NAME>',
{'first': ['Kent'], 'von': [], 'last': ['McPherson'], 'jr': []},
),
(
r'<NAME>}ppner',
{'first': ['Klaus'], 'von': [], 'last': [r'H{\"o}ppner'], 'jr': []},
),
(
r'<NAME>{\"o}m',
{'first': ['Lars'], 'von': [], 'last': [r'Hellstr{\"o}m'], 'jr': []},
),
(
r'<NAME>',
{
'first': ['Laura', 'Elizabeth'],
'von': [],
'last': ['Jackson'],
'jr': [],
},
),
(
r'<NAME>{\'{\i}}az',
{'first': ['M.'], 'von': [], 'last': [r'D{\'{\i}}az'], 'jr': []},
),
(
r'M/iche/al /O Searc/oid',
{
'first': [r'M/iche/al', r'/O'],
'von': [],
'last': [r'Searc/oid'],
'jr': [],
},
),
(
r'<NAME>{\'c}ko',
{'first': ['Marek'], 'von': [], 'last': [r'Ry{\'c}ko'], 'jr': []},
),
(
r'<NAME>',
{
'first': ['Marina', 'Yu.'],
'von': [],
'last': ['Nikulina'],
'jr': [],
},
),
(
r'<NAME>{\'{\i}}az',
{'first': ['Max'], 'von': [], 'last': [r'D{\'{\i}}az'], 'jr': []},
),
(
r'<NAME>',
{
'first': ['Merry', 'Obrecht'],
'von': [],
'last': ['Sawdey'],
'jr': [],
},
),
(
r'<NAME>{\'a}kov{\'a}',
{
'first': ['Miroslava'],
'von': [],
'last': [r'Mis{\'a}kov{\'a}'],
'jr': [],
},
),
(
r'<NAME>',
{
'first': ['N.', 'A.', 'F.', 'M.'],
'von': [],
'last': ['Poppelier'],
'jr': [],
},
),
(
r'<NAME>',
{
'first': ['Nico', 'A.', 'F.', 'M.'],
'von': [],
'last': ['Poppelier'],
'jr': [],
},
),
(
r'<NAME>',
{'first': ['Onofrio'], 'von': ['de'], 'last': ['Bari'], 'jr': []},
),
(
r'<NAME>{\'a}lez',
{
'first': ['Pablo'],
'von': [],
'last': [r'Rosell-Gonz{\'a}lez'],
'jr': [],
},
),
(
r'<NAME> Bruna',
{'first': ['Paco', 'La'], 'von': [], 'last': ['Bruna'], 'jr': []},
),
(
r'Paul Franchi-Zannettacci',
{
'first': ['Paul'],
'von': [],
'last': ['Franchi-Zannettacci'],
'jr': [],
},
),
(
r'Pavel \v{S}eve\v{c}ek',
{
'first': ['Pavel'],
'von': [],
'last': [r'\v{S}eve\v{c}ek'],
'jr': [],
},
),
(
r'<NAME>{\v{s}}ak',
{'first': ['Petr'], 'von': [], 'last': [r'Ol{\v{s}}ak'], 'jr': []},
),
(
r'<NAME>{\v{s}}{\'a}k',
{'first': ['Petr'], 'von': [], 'last': [r'Ol{\v{s}}{\'a}k'], 'jr': []},
),
(
r'Primo\v{z} Peterlin',
{'first': [r'Primo\v{z}'], 'von': [], 'last': ['Peterlin'], 'jr': []},
),
(
r'Prof. <NAME>',
{'first': ['Prof.', 'Alban'], 'von': [], 'last': ['Grimm'], 'jr': []},
),
(
r'P{\'<NAME>{\'a}r',
{
'first': [r'P{\'e}ter'],
'von': [],
'last': [r'Husz{\'a}r'],
'jr': [],
},
),
(
r'P{\'<NAME>{\'o}',
{'first': [r'P{\'e}ter'], 'von': [], 'last': [r'Szab{\'o}'], 'jr': []},
),
(
r'Rafa{\l}\.Zbikowski',
{'first': [], 'von': [], 'last': [r'Rafa{\l}\.Zbikowski'], 'jr': []},
),
(
r'<NAME>}pf',
{'first': ['Rainer'], 'von': [], 'last': [r'Sch{\"o}pf'], 'jr': []},
),
(
r'<NAME>. (Frank) Pappas',
{
'first': ['T.', 'L.', '(Frank)'],
'von': [],
'last': ['Pappas'],
'jr': [],
},
),
(
r'TUG 2004 conference',
{
'first': ['TUG', '2004'],
'von': [],
'last': ['conference'],
'jr': [],
},
),
(
r'TUG {\sltt DVI} Driver Standards Committee',
{
'first': ['TUG', '{\sltt DVI}', 'Driver', 'Standards'],
'von': [],
'last': ['Committee'],
'jr': [],
},
),
(
r'TUG {\sltt xDVIx} Driver Standards Committee',
{
'first': ['TUG'],
'von': ['{\sltt xDVIx}'],
'last': ['Driver', 'Standards', 'Committee'],
'jr': [],
},
),
(
r'University of M{\"u}nster',
{
'first': ['University'],
'von': ['of'],
'last': [r'M{\"u}nster'],
'jr': [],
},
),
(
r'<NAME>',
{
'first': ['Walter'],
'von': ['van', 'der'],
'last': ['Laan'],
'jr': [],
},
),
(
r'<NAME>',
{'first': ['Wendy', 'G.'], 'von': [], 'last': ['McKay'], 'jr': []},
),
(
r'<NAME>',
{'first': ['Wendy'], 'von': [], 'last': ['McKay'], 'jr': []},
),
(
r'W{\l}<NAME>',
{'first': [r'W{\l}odek'], 'von': [], 'last': ['Bzyl'], 'jr': []},
),
(
r'\LaTeX Project Team',
{
'first': [r'\LaTeX', 'Project'],
'von': [],
'last': ['Team'],
'jr': [],
},
),
(
r'\rlap{<NAME>ahn}',
{'first': [], 'von': [], 'last': [r'\rlap{Lutz Birkhahn}'], 'jr': []},
),
(
r'{<NAME>{}feron}',
{'first': [], 'von': [], 'last': ['{<NAME>{}feron}'], 'jr': []},
),
(
r'{<NAME>\o{}<NAME>}',
{
'first': [],
'von': [],
'last': ['{<NAME>\o{}<NAME>}'],
'jr': [],
},
),
(
r'{TUG} {Working} {Group} on a {\TeX} {Directory} {Structure}',
{
'first': ['{TUG}', '{Working}', '{Group}'],
'von': ['on', 'a'],
'last': [r'{\TeX}', '{Directory}', '{Structure}'],
'jr': [],
},
),
(
r'{The \TUB{} Team}',
{'first': [], 'von': [], 'last': [r'{The \TUB{} Team}'], 'jr': []},
),
(
r'{\LaTeX} project team',
{
'first': [r'{\LaTeX}'],
'von': ['project'],
'last': ['team'],
'jr': [],
},
),
(
r'{\NTG{} \TeX{} future working group}',
{
'first': [],
'von': [],
'last': [r'{\NTG{} \TeX{} future working group}'],
'jr': [],
},
),
(
r'{{\LaTeX\,3} Project Team}',
{
'first': [],
'von': [],
'last': [r'{{\LaTeX\,3} Project Team}'],
'jr': [],
},
),
(
r'<NAME>, <NAME>.',
{
'first': ['Derik', 'Mamania', 'M.'],
'von': [],
'last': ['Johansen', 'Kyle'],
'jr': [],
},
),
(
r"<NAME> <NAME> d'<NAME> von und zu Liechtenstein",
{
'first': [
'Johannes',
'Adam',
'Ferdinand',
'Alois',
'Josef',
'Maria',
'Marko',
],
'von': ["d'Aviano", 'Pius', 'von', 'und', 'zu'],
'last': ['Liechtenstein'],
'jr': [],
},
),
(
r"Brand\~{a}o, F",
{'first': ['F'], 'von': [], 'last': ['Brand\\', '{a}o'], 'jr': []},
),
)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"bibdeskparser.customization.splitname"
] | [((28218, 28233), 'unittest.main', 'unittest.main', ([], {}), '()\n', (28231, 28233), False, 'import unittest\n'), ((307, 320), 'bibdeskparser.customization.splitname', 'splitname', (['""""""'], {}), "('')\n", (316, 320), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((473, 490), 'bibdeskparser.customization.splitname', 'splitname', (['""" """'], {}), "(' ')\n", (482, 490), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((637, 656), 'bibdeskparser.customization.splitname', 'splitname', (['""" \t~~"""'], {}), "(' \\t~~')\n", (646, 656), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((2175, 2210), 'bibdeskparser.customization.splitname', 'splitname', (['"""BB,"""'], {'strict_mode': '(False)'}), "('BB,', strict_mode=False)\n", (2184, 2210), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((2380, 2418), 'bibdeskparser.customization.splitname', 'splitname', (['"""BB, """'], {'strict_mode': '(False)'}), "('BB, ', strict_mode=False)\n", (2389, 2418), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((2588, 2629), 'bibdeskparser.customization.splitname', 'splitname', (['"""BB, ~\t """'], {'strict_mode': '(False)'}), "('BB, ~\\t ', strict_mode=False)\n", (2597, 2629), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((2821, 2859), 'bibdeskparser.customization.splitname', 'splitname', (['""", ~\t"""'], {'strict_mode': '(False)'}), "(', ~\\t', strict_mode=False)\n", (2830, 2859), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((3227, 3273), 'bibdeskparser.customization.splitname', 'splitname', (['"""AA, BB, CC, DD"""'], {'strict_mode': '(False)'}), "('AA, BB, CC, DD', strict_mode=False)\n", (3236, 3273), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((3513, 3554), 'bibdeskparser.customization.splitname', 'splitname', (['"""AA {BB CC"""'], {'strict_mode': '(False)'}), "('AA {BB CC', strict_mode=False)\n", (3522, 3554), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((3815, 3858), 'bibdeskparser.customization.splitname', 'splitname', (['"""AA {{{BB CC"""'], {'strict_mode': '(False)'}), "('AA {{{BB CC', strict_mode=False)\n", (3824, 3858), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((4182, 4227), 'bibdeskparser.customization.splitname', 'splitname', (['"""AA {{{BB} CC}"""'], {'strict_mode': '(False)'}), "('AA {{{BB} CC}', strict_mode=False)\n", (4191, 4227), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((4615, 4656), 'bibdeskparser.customization.splitname', 'splitname', (['"""AA BB CC}"""'], {'strict_mode': '(False)'}), "('AA BB CC}', strict_mode=False)\n", (4624, 4656), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((4976, 5019), 'bibdeskparser.customization.splitname', 'splitname', (['"""AA BB CC}}}"""'], {'strict_mode': '(False)'}), "('AA BB CC}}}', strict_mode=False)\n", (4985, 5019), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((5343, 5388), 'bibdeskparser.customization.splitname', 'splitname', (['"""{AA {BB CC}}}"""'], {'strict_mode': '(False)'}), "('{AA {BB CC}}}', strict_mode=False)\n", (5352, 5388), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((5766, 5795), 'bibdeskparser.customization.splitname', 'splitname', (['"""CC, dd, {AA, BB}"""'], {}), "('CC, dd, {AA, BB}')\n", (5775, 5795), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((901, 935), 'bibdeskparser.customization.splitname', 'splitname', (['"""BB,"""'], {'strict_mode': '(True)'}), "('BB,', strict_mode=True)\n", (910, 935), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((993, 1029), 'bibdeskparser.customization.splitname', 'splitname', (['"""BB, """'], {'strict_mode': '(True)'}), "('BB, ', strict_mode=True)\n", (1002, 1029), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((1087, 1125), 'bibdeskparser.customization.splitname', 'splitname', (['"""BB, ~\t"""'], {'strict_mode': '(True)'}), "('BB, ~\\t', strict_mode=True)\n", (1096, 1125), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((1183, 1219), 'bibdeskparser.customization.splitname', 'splitname', (['""", ~\t"""'], {'strict_mode': '(True)'}), "(', ~\\t', strict_mode=True)\n", (1192, 1219), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((1299, 1344), 'bibdeskparser.customization.splitname', 'splitname', (['"""AA, BB, CC, DD"""'], {'strict_mode': '(True)'}), "('AA, BB, CC, DD', strict_mode=True)\n", (1308, 1344), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((1460, 1500), 'bibdeskparser.customization.splitname', 'splitname', (['"""AA {BB CC"""'], {'strict_mode': '(True)'}), "('AA {BB CC', strict_mode=True)\n", (1469, 1500), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((1558, 1600), 'bibdeskparser.customization.splitname', 'splitname', (['"""AA {{{BB CC"""'], {'strict_mode': '(True)'}), "('AA {{{BB CC', strict_mode=True)\n", (1567, 1600), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((1658, 1702), 'bibdeskparser.customization.splitname', 'splitname', (['"""AA {{{BB} CC}"""'], {'strict_mode': '(True)'}), "('AA {{{BB} CC}', strict_mode=True)\n", (1667, 1702), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((1793, 1833), 'bibdeskparser.customization.splitname', 'splitname', (['"""AA BB CC}"""'], {'strict_mode': '(True)'}), "('AA BB CC}', strict_mode=True)\n", (1802, 1833), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((1891, 1933), 'bibdeskparser.customization.splitname', 'splitname', (['"""AA BB CC}}}"""'], {'strict_mode': '(True)'}), "('AA BB CC}}}', strict_mode=True)\n", (1900, 1933), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((1991, 2035), 'bibdeskparser.customization.splitname', 'splitname', (['"""{AA {BB CC}}}"""'], {'strict_mode': '(True)'}), "('{AA {BB CC}}}', strict_mode=True)\n", (2000, 2035), False, 'from bibdeskparser.customization import InvalidName, splitname\n'), ((6222, 6237), 'bibdeskparser.customization.splitname', 'splitname', (['name'], {}), '(name)\n', (6231, 6237), False, 'from bibdeskparser.customization import InvalidName, splitname\n')] |
import re
import logging
logger = logging.getLogger(__name__)
def get_sorted_pair(a, b):
# ensure citation pair is always in same order
if a > b:
return (a, b)
else:
return (b, a)
def to_label(t, labels):
if t in labels:
return t
else:
return 'other'
def normalize_title(t):
if t:
t = t.replace('.', ' ').replace('-', ' ').strip().lower()
#t = re.sub(r'\W+', '', t)
return t
def normalize_section(title):
if title:
return re.sub(r'[\.0-9]', '',
title.
strip() \
.lower() \
.replace('conclusions', 'conclusion') \
.replace('methodology', 'method') \
.replace('methods', 'method') \
.replace('related works', 'related work') \
.replace('models', 'model') \
.replace('datasets', 'dataset') \
.replace('our ', '') \
.replace('evaluations', 'evaluation') \
.replace('experiments', 'experiment')
).strip()
# .replace('conclusion and future perspectives', 'conclusion')\
# .replace('materials and methods', 'methods')
def get_text_from_doc(doc) -> str:
"""
Build document text from title + abstract
:param doc: S2 paper
:return: Document text
"""
text = ''
if 'title' in doc:
text += doc['title']
if doc['abstract']:
text += '\n' + doc['abstract']
return text
def get_text_from_doc_id(doc_id: str, doc_index) -> str:
"""
Build document text from title + abstract
:param doc_id: S2-id
:param doc_index: S2-id to S2-paper data
:return: Document text
"""
if doc_id in doc_index:
return get_text_from_doc(doc_index[doc_id])
else:
raise ValueError(f'Document not found in index: {doc_id}')
# resolve 'and' titles and filter for out-of-index docs
def resolve_and_sect_titles(items, doc_index=None):
for from_s2_id, to_s2_id, sect_generic, sect_title, sect_marker in items:
if doc_index and (from_s2_id not in doc_index or to_s2_id not in doc_index):
# One of the IDs does not exist in document index
continue
sect_title = normalize_section(sect_title)
if sect_title:
# Resolve combined sections
for t in sect_title.split(' and '):
if t:
yield (from_s2_id, to_s2_id, t, sect_marker)
| [
"logging.getLogger"
] | [((35, 62), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (52, 62), False, 'import logging\n')] |
bl_info = {
"name": "Import Fromsoft FLVER models",
"description":
"Import models from various Fromsoft games such as Dark Souls",
"author": "<NAME>",
"version": (0, 1, 0),
"blender": (2, 80, 0),
"category": "Import-Export",
"location": "File > Import",
"warning": "",
"support": "COMMUNITY",
"wiki_url": "", # TODO: wiki url
"tracker_url": "", # TODO: tracker url
}
_submodules = {
"importer",
"flver",
"reader",
}
# Reload submodules on addon reload
if "bpy" in locals():
import importlib
for submodule in _submodules:
if submodule in locals():
importlib.reload(locals()[submodule])
import bpy
from . import importer
from bpy_extras.io_utils import ImportHelper
from bpy.props import StringProperty, BoolProperty
class FlverImporter(bpy.types.Operator, ImportHelper):
bl_idname = "import_scene.flver"
bl_label = "Fromsoft (.flver)"
filter_glob = StringProperty(default="*.flver", options={"HIDDEN"})
transpose_y_and_z = BoolProperty(
name="Transpose Y and Z axes",
description=("This will correct the orientation of the model. " +
"Rarely necessary to disable."),
default=True)
import_skeleton = BoolProperty(
name="Import skeleton",
description=("Disable to prevent the creation of an Armature " +
"and corresponding vertex groups."),
default=True)
connect_bones = BoolProperty(
name="Connect bones",
description=(
"Disable to import disjointed bones rotated about their " +
"original Euler angles. This may be potentially desireable "
"for authoring derivative FLVER files."),
default=True)
def execute(self, context):
importer.run(context=context,
path=self.filepath,
transpose_y_and_z=self.transpose_y_and_z,
import_skeleton=self.import_skeleton,
connect_bones=self.connect_bones)
return {"FINISHED"}
def menu_import(self, context):
self.layout.operator(FlverImporter.bl_idname)
def register():
bpy.utils.register_class(FlverImporter)
bpy.types.TOPBAR_MT_file_import.append(menu_import)
def unregister():
bpy.types.TOPBAR_MT_file_import.remove(menu_import)
bpy.utils.unregister_class(FlverImporter)
| [
"bpy.utils.unregister_class",
"bpy.props.BoolProperty",
"bpy.props.StringProperty",
"bpy.types.TOPBAR_MT_file_import.remove",
"bpy.types.TOPBAR_MT_file_import.append",
"bpy.utils.register_class"
] | [((994, 1047), 'bpy.props.StringProperty', 'StringProperty', ([], {'default': '"""*.flver"""', 'options': "{'HIDDEN'}"}), "(default='*.flver', options={'HIDDEN'})\n", (1008, 1047), False, 'from bpy.props import StringProperty, BoolProperty\n'), ((1075, 1238), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Transpose Y and Z axes"""', 'description': "('This will correct the orientation of the model. ' +\n 'Rarely necessary to disable.')", 'default': '(True)'}), "(name='Transpose Y and Z axes', description=\n 'This will correct the orientation of the model. ' +\n 'Rarely necessary to disable.', default=True)\n", (1087, 1238), False, 'from bpy.props import StringProperty, BoolProperty\n'), ((1307, 1466), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Import skeleton"""', 'description': "('Disable to prevent the creation of an Armature ' +\n 'and corresponding vertex groups.')", 'default': '(True)'}), "(name='Import skeleton', description=\n 'Disable to prevent the creation of an Armature ' +\n 'and corresponding vertex groups.', default=True)\n", (1319, 1466), False, 'from bpy.props import StringProperty, BoolProperty\n'), ((1533, 1766), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Connect bones"""', 'description': "('Disable to import disjointed bones rotated about their ' +\n 'original Euler angles. This may be potentially desireable for authoring derivative FLVER files.'\n )", 'default': '(True)'}), "(name='Connect bones', description=\n 'Disable to import disjointed bones rotated about their ' +\n 'original Euler angles. This may be potentially desireable for authoring derivative FLVER files.'\n , default=True)\n", (1545, 1766), False, 'from bpy.props import StringProperty, BoolProperty\n'), ((2265, 2304), 'bpy.utils.register_class', 'bpy.utils.register_class', (['FlverImporter'], {}), '(FlverImporter)\n', (2289, 2304), False, 'import bpy\n'), ((2310, 2361), 'bpy.types.TOPBAR_MT_file_import.append', 'bpy.types.TOPBAR_MT_file_import.append', (['menu_import'], {}), '(menu_import)\n', (2348, 2361), False, 'import bpy\n'), ((2390, 2441), 'bpy.types.TOPBAR_MT_file_import.remove', 'bpy.types.TOPBAR_MT_file_import.remove', (['menu_import'], {}), '(menu_import)\n', (2428, 2441), False, 'import bpy\n'), ((2447, 2488), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['FlverImporter'], {}), '(FlverImporter)\n', (2473, 2488), False, 'import bpy\n')] |
# -*- coding: utf-8 -*-
# https://github.com/Kodi-vStream/venom-xbmc-addons
from resources.lib.gui.contextElement import cContextElement
from resources.lib.gui.guiElement import cGuiElement
from resources.lib.db import cDb
from resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from resources.lib.handler.inputParameterHandler import cInputParameterHandler
from resources.lib.handler.pluginHandler import cPluginHandler
from resources.lib.parser import cParser
from resources.lib.util import cUtil, QuotePlus
from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc
import re, xbmcplugin
class cGui():
SITE_NAME = 'cGui'
CONTENT = 'files'
searchResults = []
# modif 22/06
listing = []
ADDON = addon()
if isKrypton():
CONTENT = 'addons'
def addMovie(self, sId, sFunction, sLabel, sIcon, sThumbnail, sDesc, oOutputParameterHandler = ''):
cGui.CONTENT = 'movies'
oGuiElement = cGuiElement()
oGuiElement.setSiteName(sId)
oGuiElement.setFunction(sFunction)
oGuiElement.setTitle(sLabel)
oGuiElement.setIcon(sIcon)
oGuiElement.setThumbnail(sThumbnail)
oGuiElement.setPoster(sThumbnail)
oGuiElement.setMeta(1)
oGuiElement.setDescription(sDesc)
#oGuiElement.setMovieFanart()
oGuiElement.setCat(1)
if oOutputParameterHandler.getValue('sMovieTitle'):
sTitle = oOutputParameterHandler.getValue('sMovieTitle')
oGuiElement.setFileName(sTitle)
try:
self.addFolder(oGuiElement, oOutputParameterHandler)
except:
pass
# Coffret et integrale de films
def addMoviePack(self, sId, sFunction, sLabel, sIcon, sThumbnail, sDesc, oOutputParameterHandler = ''):
cGui.CONTENT = 'movies'
oGuiElement = cGuiElement()
oGuiElement.setSiteName(sId)
oGuiElement.setFunction(sFunction)
oGuiElement.setTitle(sLabel)
oGuiElement.setIcon(sIcon)
oGuiElement.setThumbnail(sThumbnail)
oGuiElement.setPoster(sThumbnail)
oGuiElement.setMeta(3)
oGuiElement.setDescription(sDesc)
#oGuiElement.setMovieFanart()
oGuiElement.setCat(1)
if oOutputParameterHandler.getValue('sMovieTitle'):
sTitle = oOutputParameterHandler.getValue('sMovieTitle')
oGuiElement.setFileName(sTitle)
try:
self.addFolder(oGuiElement, oOutputParameterHandler)
except:
pass
def addTV(self, sId, sFunction, sLabel, sIcon, sThumbnail, sDesc, oOutputParameterHandler = ''):
cGui.CONTENT = 'tvshows'
oGuiElement = cGuiElement()
oGuiElement.setSiteName(sId)
oGuiElement.setFunction(sFunction)
oGuiElement.setTitle(sLabel)
oGuiElement.setIcon(sIcon)
oGuiElement.setThumbnail(sThumbnail)
oGuiElement.setPoster(sThumbnail)
oGuiElement.setMeta(2)
oGuiElement.setDescription(sDesc)
#oGuiElement.setTvFanart()
oGuiElement.setCat(2)
if oOutputParameterHandler.getValue('sMovieTitle'):
sTitle = oOutputParameterHandler.getValue('sMovieTitle')
oGuiElement.setFileName(sTitle)
try:
self.addFolder(oGuiElement, oOutputParameterHandler)
except:
pass
def addMisc(self, sId, sFunction, sLabel, sIcon, sThumbnail, sDesc, oOutputParameterHandler = ''):
#cGui.CONTENT = 'movies'
oGuiElement = cGuiElement()
oGuiElement.setSiteName(sId)
oGuiElement.setFunction(sFunction)
oGuiElement.setTitle(sLabel)
oGuiElement.setIcon(sIcon)
oGuiElement.setThumbnail(sThumbnail)
oGuiElement.setDescription(sDesc)
#oGuiElement.setPoster(sThumbnail)
oGuiElement.setMeta(0)
#oGuiElement.setDirFanart(sIcon)
oGuiElement.setCat(5)
if oOutputParameterHandler.getValue('sMovieTitle'):
sTitle = oOutputParameterHandler.getValue('sMovieTitle')
oGuiElement.setFileName(sTitle)
self.createContexMenuWatch(oGuiElement, oOutputParameterHandler)
#self.createContexMenuinfo(oGuiElement, oOutputParameterHandler)
self.createContexMenuFav(oGuiElement, oOutputParameterHandler)
try:
self.addFolder(oGuiElement, oOutputParameterHandler)
except:
pass
#non utiliser le 18/04
#def addFav(self, sId, sFunction, sLabel, sIcon, sThumbnail, fanart, oOutputParameterHandler = ''):
#cGui.CONTENT = 'files'
#oGuiElement = cGuiElement()
#oGuiElement.setSiteName(sId)
#oGuiElement.setFunction(sFunction)
#oGuiElement.setTitle(sLabel)
#oGuiElement.setIcon(sIcon)
#oGuiElement.setMeta(0)
#oGuiElement.setThumbnail(sThumbnail)
#oGuiElement.setFanart(fanart)
#self.createContexMenuDelFav(oGuiElement, oOutputParameterHandler)
#self.addFolder(oGuiElement, oOutputParameterHandler)
def addLink(self, sId, sFunction, sLabel, sThumbnail, sDesc, oOutputParameterHandler = ''):
cGui.CONTENT = 'files'
oGuiElement = cGuiElement()
oGuiElement.setSiteName(sId)
oGuiElement.setFunction(sFunction)
oGuiElement.setTitle(sLabel)
#oGuiElement.setIcon(sIcon)
oGuiElement.setThumbnail(sThumbnail)
oGuiElement.setPoster(sThumbnail)
oGuiElement.setDescription(sDesc)
oGuiElement.setMeta(0)
#oGuiElement.setDirFanart('')
oInputParameterHandler = cInputParameterHandler()
sCat = oInputParameterHandler.getValue('sCat')
if sCat:
oGuiElement.setCat(sCat)
try:
self.addFolder(oGuiElement, oOutputParameterHandler)
except:
pass
def addDir(self, sId, sFunction, sLabel, sIcon, oOutputParameterHandler = ''):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(sId)
oGuiElement.setFunction(sFunction)
oGuiElement.setTitle(sLabel)
oGuiElement.setIcon(sIcon)
oGuiElement.setThumbnail(oGuiElement.getIcon())
oGuiElement.setMeta(0)
#oGuiElement.setDirFanart(sIcon)
oOutputParameterHandler.addParameter('sFav', sFunction)
# context parametre
if isKrypton():
self.createContexMenuSettings(oGuiElement, oOutputParameterHandler)
try:
self.addFolder(oGuiElement, oOutputParameterHandler)
except:
pass
def addNext(self, sId, sFunction, sLabel, oOutputParameterHandler):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(sId)
oGuiElement.setFunction(sFunction)
oGuiElement.setTitle(sLabel)
oGuiElement.setIcon('next.png')
oGuiElement.setThumbnail(oGuiElement.getIcon())
oGuiElement.setMeta(0)
#oGuiElement.setDirFanart('next.png')
oGuiElement.setCat(5)
self.createContexMenuPageSelect(oGuiElement, oOutputParameterHandler)
self.createContexMenuFav(oGuiElement, oOutputParameterHandler)
self.addFolder(oGuiElement, oOutputParameterHandler)
# utiliser oGui.addText(SITE_IDENTIFIER)
def addNone(self, sId):
return self.addText(sId)
def addText(self, sId, sLabel = '', sIcon = 'none.png'):
# Pas de texte lors des recherches globales
if window(10101).getProperty('search') == 'true':
return
oGuiElement = cGuiElement()
oGuiElement.setSiteName(sId)
oGuiElement.setFunction('DoNothing')
if not sLabel:
sLabel = self.ADDON.VSlang(30204)
oGuiElement.setTitle(sLabel)
oGuiElement.setIcon(sIcon)
oGuiElement.setThumbnail(oGuiElement.getIcon())
oGuiElement.setMeta(0)
oOutputParameterHandler = cOutputParameterHandler()
self.addFolder(oGuiElement, oOutputParameterHandler)
# non utiliser depuis le 22/04
def addMovieDB(self, sId, sFunction, sLabel, sIcon, sThumbnail, sFanart, oOutputParameterHandler = ''):
cGui.CONTENT = 'movies'
oGuiElement = cGuiElement()
oGuiElement.setSiteName(sId)
oGuiElement.setFunction(sFunction)
oGuiElement.setTitle(sLabel)
oGuiElement.setIcon(sIcon)
oGuiElement.setMeta(1)
oGuiElement.setThumbnail(sThumbnail)
oGuiElement.setFanart(sFanart)
oGuiElement.setCat(7)
if oOutputParameterHandler.getValue('sMovieTitle'):
sTitle = oOutputParameterHandler.getValue('sMovieTitle')
oGuiElement.setFileName(sTitle)
self.addFolder(oGuiElement, oOutputParameterHandler)
# non utiliser 22/04
def addTVDB(self, sId, sFunction, sLabel, sIcon, sThumbnail, sFanart, oOutputParameterHandler = ''):
cGui.CONTENT = 'tvshows'
oGuiElement = cGuiElement()
oGuiElement.setSiteName(sId)
oGuiElement.setFunction(sFunction)
oGuiElement.setTitle(sLabel)
oGuiElement.setIcon(sIcon)
oGuiElement.setMeta(2)
oGuiElement.setThumbnail(sThumbnail)
oGuiElement.setFanart(sFanart)
oGuiElement.setCat(7)
if oOutputParameterHandler.getValue('sMovieTitle'):
sTitle = oOutputParameterHandler.getValue('sMovieTitle')
oGuiElement.setFileName(sTitle)
self.addFolder(oGuiElement, oOutputParameterHandler)
# afficher les liens non playable
def addFolder(self, oGuiElement, oOutputParameterHandler = '', _isFolder = True):
# recherche append les reponses
if window(10101).getProperty('search') == 'true':
import copy
cGui.searchResults.append({'guiElement': oGuiElement, 'params': copy.deepcopy(oOutputParameterHandler)})
return
# Des infos a rajouter ?
params = {
'siteUrl': oGuiElement.setSiteUrl, # indispensable
'sTmdbId': oGuiElement.setTmdbId,
'sImbdId': oGuiElement.setImdbId, # inutile ?
'sYear': oGuiElement.setYear,
}
for sParam, callback in params.iteritems():
value = oOutputParameterHandler.getValue(sParam)
if value:
callback(value)
oListItem = self.createListItem(oGuiElement)
oListItem.setProperty('IsPlayable', 'false')
# affiche tag HD
# if '1080' in oGuiElement.getTitle():
# oListItem.addStreamInfo('video', {'aspect': '1.78', 'width': 1920, 'height': 1080})
# elif '720' in oGuiElement.getTitle():
# oListItem.addStreamInfo('video', {'aspect': '1.50', 'width': 1280, 'height': 720})
# elif '2160'in oGuiElement.getTitle():
# oListItem.addStreamInfo('video', {'aspect': '1.78', 'width': 3840, 'height': 2160})
# oListItem.addStreamInfo('audio', {'language': 'fr'})
# if oGuiElement.getMeta():
# oOutputParameterHandler.addParameter('sMeta', oGuiElement.getMeta())
if oGuiElement.getCat():
oOutputParameterHandler.addParameter('sCat', oGuiElement.getCat())
sItemUrl = self.__createItemUrl(oGuiElement, oOutputParameterHandler)
oOutputParameterHandler.addParameter('sTitleWatched', oGuiElement.getTitleWatched())
# new context prend en charge les metas
if oGuiElement.getMeta() > 0:
if cGui.CONTENT == 'movies':
self.createContexMenuWatch(oGuiElement, oOutputParameterHandler)
self.createContexMenuFav(oGuiElement, oOutputParameterHandler)
self.createContexMenuinfo(oGuiElement, oOutputParameterHandler)
self.createContexMenuba(oGuiElement, oOutputParameterHandler)
if self.ADDON.getSetting('bstoken') != '':
self.createContexMenuTrakt(oGuiElement, oOutputParameterHandler)
if self.ADDON.getSetting('tmdb_account') != '':
self.createContexMenuTMDB(oGuiElement, oOutputParameterHandler)
self.createContexMenuSimil(oGuiElement, oOutputParameterHandler)
elif cGui.CONTENT == 'tvshows':
self.createContexMenuWatch(oGuiElement, oOutputParameterHandler)
self.createContexMenuFav(oGuiElement, oOutputParameterHandler)
self.createContexMenuinfo(oGuiElement, oOutputParameterHandler)
self.createContexMenuba(oGuiElement, oOutputParameterHandler)
if self.ADDON.getSetting('bstoken') != '':
self.createContexMenuTrakt(oGuiElement, oOutputParameterHandler)
if self.ADDON.getSetting('tmdb_account') != '':
self.createContexMenuTMDB(oGuiElement, oOutputParameterHandler)
self.createContexMenuSimil(oGuiElement, oOutputParameterHandler)
oListItem = self.__createContextMenu(oGuiElement, oListItem)
#sPluginHandle = cPluginHandler().getPluginHandle()
# modif 22/06
#xbmcplugin.addDirectoryItem(sPluginHandle, sItemUrl, oListItem, isFolder=_isFolder)
self.listing.append((sItemUrl, oListItem, _isFolder))
def createListItem(self, oGuiElement):
oListItem = listitem(oGuiElement.getTitle())
oListItem.setInfo(oGuiElement.getType(), oGuiElement.getItemValues())
#oListItem.setThumbnailImage(oGuiElement.getThumbnail())
#oListItem.setIconImage(oGuiElement.getIcon())
# krypton et sont comportement
oListItem.setArt({'poster': oGuiElement.getPoster(), 'thumb': oGuiElement.getThumbnail(), 'icon': oGuiElement.getIcon(), 'fanart': oGuiElement.getFanart()})
aProperties = oGuiElement.getItemProperties()
for sPropertyKey in aProperties.keys():
oListItem.setProperty(sPropertyKey, aProperties[sPropertyKey])
return oListItem
# affiche les liens playable
def addHost(self, oGuiElement, oOutputParameterHandler = ''):
if isKrypton():
cGui.CONTENT = 'movies'
if oOutputParameterHandler.getValue('siteUrl'):
sSiteUrl = oOutputParameterHandler.getValue('siteUrl')
oGuiElement.setSiteUrl(sSiteUrl)
oListItem = self.createListItem(oGuiElement)
oListItem.setProperty('IsPlayable', 'true')
oListItem.setProperty('Video', 'true')
oListItem.addStreamInfo('video', {})
sItemUrl = self.__createItemUrl(oGuiElement, oOutputParameterHandler)
oOutputParameterHandler.addParameter('sTitleWatched', oGuiElement.getTitleWatched())
self.createContexMenuWatch(oGuiElement, oOutputParameterHandler)
oListItem = self.__createContextMenu(oGuiElement, oListItem)
# sPluginHandle = cPluginHandler().getPluginHandle()
# modif 13/09
#xbmcplugin.addDirectoryItem(sPluginHandle, sItemUrl, oListItem, isFolder=False)
self.listing.append((sItemUrl, oListItem, False))
#Marquer vu/Non vu
def createContexMenuWatch(self, oGuiElement, oOutputParameterHandler= ''):
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cGui', oGuiElement.getSiteName(), 'setWatched', self.ADDON.VSlang(30206))
def createContexMenuPageSelect(self, oGuiElement, oOutputParameterHandler):
#sSiteUrl = oGuiElement.getSiteName()
oContext = cContextElement()
oContext.setFile('cGui')
oContext.setSiteName('cGui')
oContext.setFunction('selectpage')
oContext.setTitle(self.ADDON.VSlang(30017))
oOutputParameterHandler.addParameter('OldFunction', oGuiElement.getFunction())
oOutputParameterHandler.addParameter('sId', oGuiElement.getSiteName())
oContext.setOutputParameterHandler(oOutputParameterHandler)
oGuiElement.addContextItem(oContext)
oContext = cContextElement()
oContext.setFile('cGui')
oContext.setSiteName('cGui')
oContext.setFunction('viewback')
oContext.setTitle(self.ADDON.VSlang(30018))
oOutputParameterHandler.addParameter('sId', oGuiElement.getSiteName())
oContext.setOutputParameterHandler(oOutputParameterHandler)
oGuiElement.addContextItem(oContext)
#marque page
def createContexMenuFav(self, oGuiElement, oOutputParameterHandler = ''):
oOutputParameterHandler.addParameter('sId', oGuiElement.getSiteName())
oOutputParameterHandler.addParameter('sFav', oGuiElement.getFunction())
oOutputParameterHandler.addParameter('sCat', oGuiElement.getCat())
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cFav', 'cFav', 'setBookmark', self.ADDON.VSlang(30210))
def createContexMenuTrakt(self, oGuiElement, oOutputParameterHandler= ''):
oOutputParameterHandler.addParameter('sImdbId', oGuiElement.getImdbId())
oOutputParameterHandler.addParameter('sTmdbId', oGuiElement.getTmdbId())
oOutputParameterHandler.addParameter('sFileName', oGuiElement.getFileName())
sType = cGui.CONTENT.replace('tvshows', 'shows')
oOutputParameterHandler.addParameter('sType', sType)
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cTrakt', 'cTrakt', 'getAction', self.ADDON.VSlang(30214))
def createContexMenuTMDB(self, oGuiElement, oOutputParameterHandler = ''):
oOutputParameterHandler.addParameter('sImdbId', oGuiElement.getImdbId())
oOutputParameterHandler.addParameter('sTmdbId', oGuiElement.getTmdbId())
oOutputParameterHandler.addParameter('sFileName', oGuiElement.getFileName())
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'themoviedb_org', 'themoviedb_org', 'getAction', 'TMDB')
def createContexMenuDownload(self, oGuiElement, oOutputParameterHandler = '', status = '0'):
if status == '0':
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cDownload', 'cDownload', 'StartDownloadOneFile', self.ADDON.VSlang(30215))
if status == '0' or status == '2':
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cDownload', 'cDownload', 'delDownload', self.ADDON.VSlang(30216))
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cDownload', 'cDownload', 'DelFile', self.ADDON.VSlang(30217))
if status == '1':
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cDownload', 'cDownload', 'StopDownloadList', self.ADDON.VSlang(30218))
if status == '2':
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cDownload', 'cDownload', 'ReadDownload', self.ADDON.VSlang(30219))
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cDownload', 'cDownload', 'ResetDownload', self.ADDON.VSlang(30220))
# Information
def createContexMenuinfo(self, oGuiElement, oOutputParameterHandler = ''):
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('sTitle', oGuiElement.getTitle())
oOutputParameterHandler.addParameter('sFileName', oGuiElement.getFileName())
oOutputParameterHandler.addParameter('sId', oGuiElement.getSiteName())
oOutputParameterHandler.addParameter('sMeta', oGuiElement.getMeta())
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cGui', oGuiElement.getSiteName(), 'viewinfo', self.ADDON.VSlang(30208))
# Bande annonce
def createContexMenuba(self, oGuiElement, oOutputParameterHandler = ''):
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('sTitle', oGuiElement.getTitle())
oOutputParameterHandler.addParameter('sFileName', oGuiElement.getFileName())
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cGui', oGuiElement.getSiteName(), 'viewBA', self.ADDON.VSlang(30212))
# Recherche similaire
def createContexMenuSimil(self, oGuiElement, oOutputParameterHandler = ''):
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('sFileName', oGuiElement.getFileName())
oOutputParameterHandler.addParameter('sTitle', oGuiElement.getTitle())
oOutputParameterHandler.addParameter('sCat', oGuiElement.getCat())
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cGui', oGuiElement.getSiteName(), 'viewsimil', self.ADDON.VSlang(30213))
def CreateSimpleMenu(self, oGuiElement, oOutputParameterHandler, sFile, sName, sFunction, sTitle):
oContext = cContextElement()
oContext.setFile(sFile)
oContext.setSiteName(sName)
oContext.setFunction(sFunction)
oContext.setTitle(sTitle)
oContext.setOutputParameterHandler(oOutputParameterHandler)
oGuiElement.addContextItem(oContext)
def createContexMenuDelFav(self, oGuiElement, oOutputParameterHandler = ''):
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'cFav', 'cFav', 'delBookmarksMenu', self.ADDON.VSlang(30209))
def createContexMenuSettings(self, oGuiElement, oOutputParameterHandler = ''):
self.CreateSimpleMenu(oGuiElement, oOutputParameterHandler, 'globalParametre', 'globalParametre', 'opensetting', self.ADDON.VSlang(30023))
def __createContextMenu(self, oGuiElement, oListItem):
sPluginPath = cPluginHandler().getPluginPath()
aContextMenus = []
# Menus classiques reglés a la base
if len(oGuiElement.getContextItems()) > 0:
for oContextItem in oGuiElement.getContextItems():
oOutputParameterHandler = oContextItem.getOutputParameterHandler()
sParams = oOutputParameterHandler.getParameterAsUri()
sTest = '%s?site=%s&function=%s&%s' % (sPluginPath, oContextItem.getFile(), oContextItem.getFunction(), sParams)
aContextMenus += [(oContextItem.getTitle(), 'XBMC.RunPlugin(%s)' % (sTest, ), )]
oListItem.addContextMenuItems(aContextMenus, True)
return oListItem
def __ContextMenu(self, oGuiElement, oListItem):
sPluginPath = cPluginHandler().getPluginPath()
aContextMenus = []
if len(oGuiElement.getContextItems()) > 0:
for oContextItem in oGuiElement.getContextItems():
oOutputParameterHandler = oContextItem.getOutputParameterHandler()
sParams = oOutputParameterHandler.getParameterAsUri()
sTest = '%s?site=%s&function=%s&%s' % (sPluginPath, oContextItem.getFile(), oContextItem.getFunction(), sParams)
aContextMenus += [(oContextItem.getTitle(), 'XBMC.RunPlugin(%s)' % (sTest, ), )]
oListItem.addContextMenuItems(aContextMenus)
#oListItem.addContextMenuItems(aContextMenus, True)
return oListItem
def __ContextMenuPlay(self, oGuiElement, oListItem):
sPluginPath = cPluginHandler().getPluginPath()
aContextMenus = []
if len(oGuiElement.getContextItems()) > 0:
for oContextItem in oGuiElement.getContextItems():
oOutputParameterHandler = oContextItem.getOutputParameterHandler()
sParams = oOutputParameterHandler.getParameterAsUri()
sTest = '%s?site=%s&function=%s&%s' % (sPluginPath, oContextItem.getFile(), oContextItem.getFunction(), sParams)
aContextMenus += [(oContextItem.getTitle(), 'XBMC.RunPlugin(%s)' % (sTest, ), )]
oListItem.addContextMenuItems(aContextMenus)
#oListItem.addContextMenuItems(aContextMenus, True)
return oListItem
def setEndOfDirectory(self, ForceViewMode = False):
iHandler = cPluginHandler().getPluginHandle()
# modif 22/06
if not self.listing:
self.addText('cGui')
xbmcplugin.addDirectoryItems(iHandler, self.listing, len(self.listing))
xbmcplugin.setPluginCategory(iHandler, '')
xbmcplugin.setContent(iHandler, cGui.CONTENT)
xbmcplugin.addSortMethod(iHandler, xbmcplugin.SORT_METHOD_NONE)
xbmcplugin.endOfDirectory(iHandler, succeeded = True, cacheToDisc = True)
# reglage vue
# 50 = liste / 51 grande liste / 500 icone / 501 gallerie / 508 fanart /
if ForceViewMode:
xbmc.executebuiltin('Container.SetViewMode(' + str(ForceViewMode) + ')')
else:
if self.ADDON.getSetting('active-view') == 'true':
if cGui.CONTENT == 'movies':
#xbmc.executebuiltin('Container.SetViewMode(507)')
xbmc.executebuiltin('Container.SetViewMode(%s)' % self.ADDON.getSetting('movie-view'))
elif cGui.CONTENT == 'tvshows':
xbmc.executebuiltin('Container.SetViewMode(%s)' % self.ADDON.getSetting('serie-view'))
elif cGui.CONTENT == 'files':
xbmc.executebuiltin('Container.SetViewMode(%s)' % self.ADDON.getSetting('default-view'))
# bug affichage Kodi 18
del self.listing [:]
def updateDirectory(self):
xbmc.executebuiltin('Container.Refresh')
def viewback(self):
sPluginPath = cPluginHandler().getPluginPath()
oInputParameterHandler = cInputParameterHandler()
# sParams = oInputParameterHandler.getAllParameter()
sId = oInputParameterHandler.getValue('sId')
sTest = '%s?site=%s' % (sPluginPath, sId)
xbmc.executebuiltin('XBMC.Container.Update(%s, replace)' % sTest)
def viewsimil(self):
sPluginPath = cPluginHandler().getPluginPath()
oInputParameterHandler = cInputParameterHandler()
# sFileName = oInputParameterHandler.getValue('sFileName')
sTitle = oInputParameterHandler.getValue('sTitle')
sCat = oInputParameterHandler.getValue('sCat')
oOutputParameterHandler = cOutputParameterHandler()
#oOutputParameterHandler.addParameter('searchtext', sFileName)
oOutputParameterHandler.addParameter('searchtext', cUtil().CleanName(sTitle))
oOutputParameterHandler.addParameter('sCat', sCat)
oOutputParameterHandler.addParameter('readdb', 'False')
sParams = oOutputParameterHandler.getParameterAsUri()
sTest = '%s?site=%s&function=%s&%s' % (sPluginPath, 'globalSearch', 'globalSearch', sParams)
xbmc.executebuiltin('XBMC.Container.Update(%s)' % sTest)
return False
def selectpage(self):
sPluginPath = cPluginHandler().getPluginPath()
oInputParameterHandler = cInputParameterHandler()
#sParams = oInputParameterHandler.getAllParameter()
sId = oInputParameterHandler.getValue('sId')
sFunction = oInputParameterHandler.getValue('OldFunction')
siteUrl = oInputParameterHandler.getValue('siteUrl')
oParser = cParser()
oldNum = oParser.getNumberFromString(siteUrl)
newNum = 0
if oldNum:
newNum = self.showNumBoard()
if newNum:
try:
siteUrl = siteUrl.replace(oldNum, newNum)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', siteUrl)
sParams = oOutputParameterHandler.getParameterAsUri()
sTest = '%s?site=%s&function=%s&%s' % (sPluginPath, sId, sFunction, sParams)
xbmc.executebuiltin('XBMC.Container.Update(%s)' % sTest)
except:
return False
return False
def selectpage2(self):
sPluginPath = cPluginHandler().getPluginPath()
oInputParameterHandler = cInputParameterHandler()
sId = oInputParameterHandler.getValue('sId')
sFunction = oInputParameterHandler.getValue('OldFunction')
siteUrl = oInputParameterHandler.getValue('siteUrl')
# sParams = oInputParameterHandler.getAllParameter()
selpage = self.showNumBoard()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', siteUrl)
oOutputParameterHandler.addParameter('Selpage', selpage)
sParams = oOutputParameterHandler.getParameterAsUri()
sTest = '%s?site=%s&function=%s&%s' % (sPluginPath, sId, sFunction, sParams)
xbmc.executebuiltin('XBMC.Container.Update(%s, replace)' % sTest)
def setWatched(self):
if True:
#Use VStream database
oInputParameterHandler = cInputParameterHandler()
sSite = oInputParameterHandler.getValue('siteUrl')
sTitle = oInputParameterHandler.getValue('sTitleWatched')
if not sTitle:
return
meta = {}
meta['title'] = sTitle
meta['site'] = sSite
db = cDb()
row = db.get_watched(meta)
if row:
db.del_watched(meta)
db.del_resume(meta)
else:
db.insert_watched(meta)
# To test
#xbmc.executebuiltin('Container.Refresh')
else:
# Use kodi buildin feature
xbmc.executebuiltin('Action(ToggleWatched)')
# Not usefull ?
#xbmc.executebuiltin('Container.Refresh')
def viewBA(self):
oInputParameterHandler = cInputParameterHandler()
sFileName = oInputParameterHandler.getValue('sFileName')
from resources.lib.ba import cShowBA
cBA = cShowBA()
cBA.SetSearch(sFileName)
cBA.SearchBA()
def viewinfo(self):
from resources.lib.config import WindowsBoxes
# oGuiElement = cGuiElement()
oInputParameterHandler = cInputParameterHandler()
sTitle = oInputParameterHandler.getValue('sTitle')
# sId = oInputParameterHandler.getValue('sId')
sFileName = oInputParameterHandler.getValue('sFileName')
sMeta = oInputParameterHandler.getValue('sMeta')
sYear = oInputParameterHandler.getValue('sYear')
# sMeta = 1 >> film sMeta = 2 >> serie
sCleanTitle = cUtil().CleanName(sFileName)
# on vire saison et episode
if True: # sMeta == 2:
sCleanTitle = re.sub('(?i).pisode [0-9]+', '', sCleanTitle)
sCleanTitle = re.sub('(?i)saison [0-9]+', '', sCleanTitle)
sCleanTitle = re.sub('(?i)S[0-9]+E[0-9]+', '', sCleanTitle)
sCleanTitle = re.sub('(?i)[S|E][0-9]+', '', sCleanTitle)
ui = WindowsBoxes(sTitle, sCleanTitle, sMeta, sYear)
def __createItemUrl(self, oGuiElement, oOutputParameterHandler = ''):
if (oOutputParameterHandler == ''):
oOutputParameterHandler = cOutputParameterHandler()
sParams = oOutputParameterHandler.getParameterAsUri()
# cree une id unique
# if oGuiElement.getSiteUrl():
# print str(hash(oGuiElement.getSiteUrl()))
sPluginPath = cPluginHandler().getPluginPath()
if (len(oGuiElement.getFunction()) == 0):
sItemUrl = '%s?site=%s&title=%s&%s' % (sPluginPath, oGuiElement.getSiteName(), QuotePlus(oGuiElement.getCleanTitle()), sParams)
else:
sItemUrl = '%s?site=%s&function=%s&title=%s&%s' % (sPluginPath, oGuiElement.getSiteName(), oGuiElement.getFunction(), QuotePlus(oGuiElement.getCleanTitle()), sParams)
#print sItemUrl
return sItemUrl
def showKeyBoard(self, sDefaultText = '', heading = ''):
keyboard = xbmc.Keyboard(sDefaultText)
keyboard.setHeading(heading)
keyboard.doModal()
if (keyboard.isConfirmed()):
sSearchText = keyboard.getText()
if (len(sSearchText)) > 0:
return sSearchText
return False
def showNumBoard(self, sDefaultNum = ''):
dialogs = dialog()
numboard = dialogs.numeric(0, self.ADDON.VSlang(30019), sDefaultNum)
#numboard.doModal()
if numboard != None:
return numboard
return False
def openSettings(self):
return False
def showNofication(self, sTitle, iSeconds = 0):
return False
def showError(self, sTitle, sDescription, iSeconds = 0):
return False
def showInfo(self, sTitle, sDescription, iSeconds = 0):
return False
| [
"resources.lib.comaddon.addon",
"resources.lib.comaddon.window",
"resources.lib.comaddon.isKrypton",
"resources.lib.gui.guiElement.cGuiElement",
"resources.lib.comaddon.dialog",
"copy.deepcopy",
"resources.lib.ba.cShowBA",
"resources.lib.comaddon.xbmc.Keyboard",
"xbmcplugin.setContent",
"resources... | [((773, 780), 'resources.lib.comaddon.addon', 'addon', ([], {}), '()\n', (778, 780), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((789, 800), 'resources.lib.comaddon.isKrypton', 'isKrypton', ([], {}), '()\n', (798, 800), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((988, 1001), 'resources.lib.gui.guiElement.cGuiElement', 'cGuiElement', ([], {}), '()\n', (999, 1001), False, 'from resources.lib.gui.guiElement import cGuiElement\n'), ((1867, 1880), 'resources.lib.gui.guiElement.cGuiElement', 'cGuiElement', ([], {}), '()\n', (1878, 1880), False, 'from resources.lib.gui.guiElement import cGuiElement\n'), ((2704, 2717), 'resources.lib.gui.guiElement.cGuiElement', 'cGuiElement', ([], {}), '()\n', (2715, 2717), False, 'from resources.lib.gui.guiElement import cGuiElement\n'), ((3540, 3553), 'resources.lib.gui.guiElement.cGuiElement', 'cGuiElement', ([], {}), '()\n', (3551, 3553), False, 'from resources.lib.gui.guiElement import cGuiElement\n'), ((5205, 5218), 'resources.lib.gui.guiElement.cGuiElement', 'cGuiElement', ([], {}), '()\n', (5216, 5218), False, 'from resources.lib.gui.guiElement import cGuiElement\n'), ((5604, 5628), 'resources.lib.handler.inputParameterHandler.cInputParameterHandler', 'cInputParameterHandler', ([], {}), '()\n', (5626, 5628), False, 'from resources.lib.handler.inputParameterHandler import cInputParameterHandler\n'), ((5956, 5969), 'resources.lib.gui.guiElement.cGuiElement', 'cGuiElement', ([], {}), '()\n', (5967, 5969), False, 'from resources.lib.gui.guiElement import cGuiElement\n'), ((6355, 6366), 'resources.lib.comaddon.isKrypton', 'isKrypton', ([], {}), '()\n', (6364, 6366), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((6655, 6668), 'resources.lib.gui.guiElement.cGuiElement', 'cGuiElement', ([], {}), '()\n', (6666, 6668), False, 'from resources.lib.gui.guiElement import cGuiElement\n'), ((7531, 7544), 'resources.lib.gui.guiElement.cGuiElement', 'cGuiElement', ([], {}), '()\n', (7542, 7544), False, 'from resources.lib.gui.guiElement import cGuiElement\n'), ((7890, 7915), 'resources.lib.handler.outputParameterHandler.cOutputParameterHandler', 'cOutputParameterHandler', ([], {}), '()\n', (7913, 7915), False, 'from resources.lib.handler.outputParameterHandler import cOutputParameterHandler\n'), ((8176, 8189), 'resources.lib.gui.guiElement.cGuiElement', 'cGuiElement', ([], {}), '()\n', (8187, 8189), False, 'from resources.lib.gui.guiElement import cGuiElement\n'), ((8910, 8923), 'resources.lib.gui.guiElement.cGuiElement', 'cGuiElement', ([], {}), '()\n', (8921, 8923), False, 'from resources.lib.gui.guiElement import cGuiElement\n'), ((14003, 14014), 'resources.lib.comaddon.isKrypton', 'isKrypton', ([], {}), '()\n', (14012, 14014), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((15359, 15376), 'resources.lib.gui.contextElement.cContextElement', 'cContextElement', ([], {}), '()\n', (15374, 15376), False, 'from resources.lib.gui.contextElement import cContextElement\n'), ((15841, 15858), 'resources.lib.gui.contextElement.cContextElement', 'cContextElement', ([], {}), '()\n', (15856, 15858), False, 'from resources.lib.gui.contextElement import cContextElement\n'), ((18901, 18926), 'resources.lib.handler.outputParameterHandler.cOutputParameterHandler', 'cOutputParameterHandler', ([], {}), '()\n', (18924, 18926), False, 'from resources.lib.handler.outputParameterHandler import cOutputParameterHandler\n'), ((19522, 19547), 'resources.lib.handler.outputParameterHandler.cOutputParameterHandler', 'cOutputParameterHandler', ([], {}), '()\n', (19545, 19547), False, 'from resources.lib.handler.outputParameterHandler import cOutputParameterHandler\n'), ((19994, 20019), 'resources.lib.handler.outputParameterHandler.cOutputParameterHandler', 'cOutputParameterHandler', ([], {}), '()\n', (20017, 20019), False, 'from resources.lib.handler.outputParameterHandler import cOutputParameterHandler\n'), ((20525, 20542), 'resources.lib.gui.contextElement.cContextElement', 'cContextElement', ([], {}), '()\n', (20540, 20542), False, 'from resources.lib.gui.contextElement import cContextElement\n'), ((23858, 23900), 'xbmcplugin.setPluginCategory', 'xbmcplugin.setPluginCategory', (['iHandler', '""""""'], {}), "(iHandler, '')\n", (23886, 23900), False, 'import re, xbmcplugin\n'), ((23909, 23954), 'xbmcplugin.setContent', 'xbmcplugin.setContent', (['iHandler', 'cGui.CONTENT'], {}), '(iHandler, cGui.CONTENT)\n', (23930, 23954), False, 'import re, xbmcplugin\n'), ((23963, 24026), 'xbmcplugin.addSortMethod', 'xbmcplugin.addSortMethod', (['iHandler', 'xbmcplugin.SORT_METHOD_NONE'], {}), '(iHandler, xbmcplugin.SORT_METHOD_NONE)\n', (23987, 24026), False, 'import re, xbmcplugin\n'), ((24035, 24104), 'xbmcplugin.endOfDirectory', 'xbmcplugin.endOfDirectory', (['iHandler'], {'succeeded': '(True)', 'cacheToDisc': '(True)'}), '(iHandler, succeeded=True, cacheToDisc=True)\n', (24060, 24104), False, 'import re, xbmcplugin\n'), ((25035, 25075), 'resources.lib.comaddon.xbmc.executebuiltin', 'xbmc.executebuiltin', (['"""Container.Refresh"""'], {}), "('Container.Refresh')\n", (25054, 25075), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((25189, 25213), 'resources.lib.handler.inputParameterHandler.cInputParameterHandler', 'cInputParameterHandler', ([], {}), '()\n', (25211, 25213), False, 'from resources.lib.handler.inputParameterHandler import cInputParameterHandler\n'), ((25387, 25452), 'resources.lib.comaddon.xbmc.executebuiltin', 'xbmc.executebuiltin', (["('XBMC.Container.Update(%s, replace)' % sTest)"], {}), "('XBMC.Container.Update(%s, replace)' % sTest)\n", (25406, 25452), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((25567, 25591), 'resources.lib.handler.inputParameterHandler.cInputParameterHandler', 'cInputParameterHandler', ([], {}), '()\n', (25589, 25591), False, 'from resources.lib.handler.inputParameterHandler import cInputParameterHandler\n'), ((25808, 25833), 'resources.lib.handler.outputParameterHandler.cOutputParameterHandler', 'cOutputParameterHandler', ([], {}), '()\n', (25831, 25833), False, 'from resources.lib.handler.outputParameterHandler import cOutputParameterHandler\n'), ((26286, 26342), 'resources.lib.comaddon.xbmc.executebuiltin', 'xbmc.executebuiltin', (["('XBMC.Container.Update(%s)' % sTest)"], {}), "('XBMC.Container.Update(%s)' % sTest)\n", (26305, 26342), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((26479, 26503), 'resources.lib.handler.inputParameterHandler.cInputParameterHandler', 'cInputParameterHandler', ([], {}), '()\n', (26501, 26503), False, 'from resources.lib.handler.inputParameterHandler import cInputParameterHandler\n'), ((26764, 26773), 'resources.lib.parser.cParser', 'cParser', ([], {}), '()\n', (26771, 26773), False, 'from resources.lib.parser import cParser\n'), ((27566, 27590), 'resources.lib.handler.inputParameterHandler.cInputParameterHandler', 'cInputParameterHandler', ([], {}), '()\n', (27588, 27590), False, 'from resources.lib.handler.inputParameterHandler import cInputParameterHandler\n'), ((27908, 27933), 'resources.lib.handler.outputParameterHandler.cOutputParameterHandler', 'cOutputParameterHandler', ([], {}), '()\n', (27931, 27933), False, 'from resources.lib.handler.outputParameterHandler import cOutputParameterHandler\n'), ((28220, 28285), 'resources.lib.comaddon.xbmc.executebuiltin', 'xbmc.executebuiltin', (["('XBMC.Container.Update(%s, replace)' % sTest)"], {}), "('XBMC.Container.Update(%s, replace)' % sTest)\n", (28239, 28285), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((29232, 29256), 'resources.lib.handler.inputParameterHandler.cInputParameterHandler', 'cInputParameterHandler', ([], {}), '()\n', (29254, 29256), False, 'from resources.lib.handler.inputParameterHandler import cInputParameterHandler\n'), ((29382, 29391), 'resources.lib.ba.cShowBA', 'cShowBA', ([], {}), '()\n', (29389, 29391), False, 'from resources.lib.ba import cShowBA\n'), ((29599, 29623), 'resources.lib.handler.inputParameterHandler.cInputParameterHandler', 'cInputParameterHandler', ([], {}), '()\n', (29621, 29623), False, 'from resources.lib.handler.inputParameterHandler import cInputParameterHandler\n'), ((30383, 30430), 'resources.lib.config.WindowsBoxes', 'WindowsBoxes', (['sTitle', 'sCleanTitle', 'sMeta', 'sYear'], {}), '(sTitle, sCleanTitle, sMeta, sYear)\n', (30395, 30430), False, 'from resources.lib.config import WindowsBoxes\n'), ((31372, 31399), 'resources.lib.comaddon.xbmc.Keyboard', 'xbmc.Keyboard', (['sDefaultText'], {}), '(sDefaultText)\n', (31385, 31399), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((31707, 31715), 'resources.lib.comaddon.dialog', 'dialog', ([], {}), '()\n', (31713, 31715), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((28401, 28425), 'resources.lib.handler.inputParameterHandler.cInputParameterHandler', 'cInputParameterHandler', ([], {}), '()\n', (28423, 28425), False, 'from resources.lib.handler.inputParameterHandler import cInputParameterHandler\n'), ((28718, 28723), 'resources.lib.db.cDb', 'cDb', ([], {}), '()\n', (28721, 28723), False, 'from resources.lib.db import cDb\n'), ((29056, 29100), 'resources.lib.comaddon.xbmc.executebuiltin', 'xbmc.executebuiltin', (['"""Action(ToggleWatched)"""'], {}), "('Action(ToggleWatched)')\n", (29075, 29100), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((30111, 30156), 're.sub', 're.sub', (['"""(?i).pisode [0-9]+"""', '""""""', 'sCleanTitle'], {}), "('(?i).pisode [0-9]+', '', sCleanTitle)\n", (30117, 30156), False, 'import re, xbmcplugin\n'), ((30183, 30227), 're.sub', 're.sub', (['"""(?i)saison [0-9]+"""', '""""""', 'sCleanTitle'], {}), "('(?i)saison [0-9]+', '', sCleanTitle)\n", (30189, 30227), False, 'import re, xbmcplugin\n'), ((30254, 30299), 're.sub', 're.sub', (['"""(?i)S[0-9]+E[0-9]+"""', '""""""', 'sCleanTitle'], {}), "('(?i)S[0-9]+E[0-9]+', '', sCleanTitle)\n", (30260, 30299), False, 'import re, xbmcplugin\n'), ((30326, 30368), 're.sub', 're.sub', (['"""(?i)[S|E][0-9]+"""', '""""""', 'sCleanTitle'], {}), "('(?i)[S|E][0-9]+', '', sCleanTitle)\n", (30332, 30368), False, 'import re, xbmcplugin\n'), ((30588, 30613), 'resources.lib.handler.outputParameterHandler.cOutputParameterHandler', 'cOutputParameterHandler', ([], {}), '()\n', (30611, 30613), False, 'from resources.lib.handler.outputParameterHandler import cOutputParameterHandler\n'), ((21324, 21340), 'resources.lib.handler.pluginHandler.cPluginHandler', 'cPluginHandler', ([], {}), '()\n', (21338, 21340), False, 'from resources.lib.handler.pluginHandler import cPluginHandler\n'), ((22088, 22104), 'resources.lib.handler.pluginHandler.cPluginHandler', 'cPluginHandler', ([], {}), '()\n', (22102, 22104), False, 'from resources.lib.handler.pluginHandler import cPluginHandler\n'), ((22870, 22886), 'resources.lib.handler.pluginHandler.cPluginHandler', 'cPluginHandler', ([], {}), '()\n', (22884, 22886), False, 'from resources.lib.handler.pluginHandler import cPluginHandler\n'), ((23649, 23665), 'resources.lib.handler.pluginHandler.cPluginHandler', 'cPluginHandler', ([], {}), '()\n', (23663, 23665), False, 'from resources.lib.handler.pluginHandler import cPluginHandler\n'), ((25123, 25139), 'resources.lib.handler.pluginHandler.cPluginHandler', 'cPluginHandler', ([], {}), '()\n', (25137, 25139), False, 'from resources.lib.handler.pluginHandler import cPluginHandler\n'), ((25501, 25517), 'resources.lib.handler.pluginHandler.cPluginHandler', 'cPluginHandler', ([], {}), '()\n', (25515, 25517), False, 'from resources.lib.handler.pluginHandler import cPluginHandler\n'), ((26413, 26429), 'resources.lib.handler.pluginHandler.cPluginHandler', 'cPluginHandler', ([], {}), '()\n', (26427, 26429), False, 'from resources.lib.handler.pluginHandler import cPluginHandler\n'), ((27044, 27069), 'resources.lib.handler.outputParameterHandler.cOutputParameterHandler', 'cOutputParameterHandler', ([], {}), '()\n', (27067, 27069), False, 'from resources.lib.handler.outputParameterHandler import cOutputParameterHandler\n'), ((27322, 27378), 'resources.lib.comaddon.xbmc.executebuiltin', 'xbmc.executebuiltin', (["('XBMC.Container.Update(%s)' % sTest)"], {}), "('XBMC.Container.Update(%s)' % sTest)\n", (27341, 27378), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((27500, 27516), 'resources.lib.handler.pluginHandler.cPluginHandler', 'cPluginHandler', ([], {}), '()\n', (27514, 27516), False, 'from resources.lib.handler.pluginHandler import cPluginHandler\n'), ((29987, 29994), 'resources.lib.util.cUtil', 'cUtil', ([], {}), '()\n', (29992, 29994), False, 'from resources.lib.util import cUtil, QuotePlus\n'), ((30825, 30841), 'resources.lib.handler.pluginHandler.cPluginHandler', 'cPluginHandler', ([], {}), '()\n', (30839, 30841), False, 'from resources.lib.handler.pluginHandler import cPluginHandler\n'), ((7442, 7455), 'resources.lib.comaddon.window', 'window', (['(10101)'], {}), '(10101)\n', (7448, 7455), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((9635, 9648), 'resources.lib.comaddon.window', 'window', (['(10101)'], {}), '(10101)\n', (9641, 9648), False, 'from resources.lib.comaddon import listitem, addon, dialog, isKrypton, window, xbmc\n'), ((9782, 9820), 'copy.deepcopy', 'copy.deepcopy', (['oOutputParameterHandler'], {}), '(oOutputParameterHandler)\n', (9795, 9820), False, 'import copy\n'), ((25964, 25971), 'resources.lib.util.cUtil', 'cUtil', ([], {}), '()\n', (25969, 25971), False, 'from resources.lib.util import cUtil, QuotePlus\n')] |
#!/usr/bin/env python3
__copyright__ = 'Copyright 2013-2016, http://radical.rutgers.edu'
__license__ = 'MIT'
import sys
import radical.utils as ru
import radical.pilot as rp
rpu = rp.utils
# ------------------------------------------------------------------------------
#
if __name__ == '__main__':
if len(sys.argv) <= 1:
print("\n\tusage: %s <session_id>\n")
sys.exit(1)
sid = sys.argv[1]
profiles = rpu.fetch_profiles(sid=sid, skip_existing=True)
for p in profiles:
print(p)
profs = ru.read_profiles(profiles)
for p in profs:
print(type(p))
prof = ru.combine_profiles(profs)
print(len(prof))
for entry in prof:
print(entry)
# ------------------------------------------------------------------------------
| [
"radical.utils.combine_profiles",
"radical.utils.read_profiles",
"sys.exit"
] | [((541, 567), 'radical.utils.read_profiles', 'ru.read_profiles', (['profiles'], {}), '(profiles)\n', (557, 567), True, 'import radical.utils as ru\n'), ((624, 650), 'radical.utils.combine_profiles', 'ru.combine_profiles', (['profs'], {}), '(profs)\n', (643, 650), True, 'import radical.utils as ru\n'), ((388, 399), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (396, 399), False, 'import sys\n')] |
#!/usr/bin/env python3.6
from user import User
from credential import Credential
def createUser(userName,password):
'''
Function to create a new user
'''
newUser = User(userName,password)
return newUser
def saveUsers(user):
'''
Function to save users
'''
user.saveUser()
def createCredential(firstName,lastName,accountName,password):
newCredential = Credential(firstName,lastName,accountName,password)
return newCredential
def saveCredential(credential):
'''
Function to save a new credential
'''
Credential.saveCredential(credential)
def delCredential(credential):
'''
Function to delete a credential
'''
credential.deleteCredential()
def findCredential(name):
'''
Function that finds a credential by name returns the credential
'''
return Credential.find_by_name(name)
def check_existingCredentials(name):
'''
Function that checks if a credential exists with that name and return a boolean
'''
return Credential.credential_exist(name)
def displayCredentials():
'''
Function that returns all the saved credentials
'''
return Credential.displayCredentials()
def main():
print("Hello Welcome to password locker.\n Login:")
userName = input("What is your name?")
password = input("Enter your password :")
print(f"Hello {userName}. what would you like to do??\n Create an acount First!!" )
print("-"* 15)
while True:
print("Us this short codes : cc - Create a new credential, dc -display credentials, fc -to search a credential, dl -to delete credential, ex -exit the credential list ")
short_code = input()
if short_code == 'cc':
print("New Credential")
print("-"*10)
print ("firstName ....")
firstName = input()
print("lastName ...")
lastName = input()
print("accountName ...")
accountName = input()
print("password ...")
password = input()
saveCredential(createCredential(firstName,lastName,accountName,password)) # create and save new credential.
print('\n')
# print (f'New Credential {firstName} {lastName} {accountName} created')
print('\n')
elif short_code == 'dc':
if displayCredentials():
print("Here is a list of all your credentials")
print('\n')
for credential in displayCredentials():
print(f"{credential.firstName} {credential.lastName} ....{credential.accountName}")
print('\n')
else:
print('\n')
print("You dont seem to have any credentials saved yet")
print('\n')
elif short_code =='dl':
print("Are your sure you want to delete this credential\n Please insert the name of the credential:")
searchName = input()
deleteCredential = Credential.deleteCredential(searchName)
elif short_code == 'fc':
print("Enter the name you want to search for")
searchName = input()
searchCredential = findCredential(searchName)
print(f" {searchCredential.lastName}")
print('-' * 20)
print(f"accountName........{searchCredential.accountName}")
elif short_code == "ex":
print("Bye ......")
break
else:
print("I really didn't get that. Please use the short codes")
if __name__ == '__main__':
main()
| [
"credential.Credential",
"credential.Credential.find_by_name",
"credential.Credential.deleteCredential",
"credential.Credential.saveCredential",
"credential.Credential.displayCredentials",
"credential.Credential.credential_exist",
"user.User"
] | [((182, 206), 'user.User', 'User', (['userName', 'password'], {}), '(userName, password)\n', (186, 206), False, 'from user import User\n'), ((395, 449), 'credential.Credential', 'Credential', (['firstName', 'lastName', 'accountName', 'password'], {}), '(firstName, lastName, accountName, password)\n', (405, 449), False, 'from credential import Credential\n'), ((569, 606), 'credential.Credential.saveCredential', 'Credential.saveCredential', (['credential'], {}), '(credential)\n', (594, 606), False, 'from credential import Credential\n'), ((849, 878), 'credential.Credential.find_by_name', 'Credential.find_by_name', (['name'], {}), '(name)\n', (872, 878), False, 'from credential import Credential\n'), ((1029, 1062), 'credential.Credential.credential_exist', 'Credential.credential_exist', (['name'], {}), '(name)\n', (1056, 1062), False, 'from credential import Credential\n'), ((1170, 1201), 'credential.Credential.displayCredentials', 'Credential.displayCredentials', ([], {}), '()\n', (1199, 1201), False, 'from credential import Credential\n'), ((3186, 3225), 'credential.Credential.deleteCredential', 'Credential.deleteCredential', (['searchName'], {}), '(searchName)\n', (3213, 3225), False, 'from credential import Credential\n')] |
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
import InsightToolkit as itk
import sys
reader = itk.itkImageFileReaderF2_New()
reader.SetFileName( sys.argv[1] )
diffusion = itk.itkGradientAnisotropicDiffusionImageFilterF2F2_New()
diffusion.SetInput(reader.GetOutput())
diffusion.SetTimeStep(0.0625)
diffusion.SetConductanceParameter(9.0)
diffusion.SetNumberOfIterations( 5 );
gradient = itk.itkGradientMagnitudeImageFilterF2F2_New()
gradient.SetInput(diffusion.GetOutput())
watershed = itk.itkWatershedImageFilterF2_New()
watershed.SetInput(gradient.GetOutput())
watershed.SetThreshold(0.01)
watershed.SetLevel(0.2)
writer = itk.itkImageFileWriterUL2_New()
writer.SetFileName( sys.argv[2] )
writer.SetInput( watershed.GetOutput() )
writer.Update()
| [
"InsightToolkit.itkGradientMagnitudeImageFilterF2F2_New",
"InsightToolkit.itkWatershedImageFilterF2_New",
"InsightToolkit.itkImageFileWriterUL2_New",
"InsightToolkit.itkGradientAnisotropicDiffusionImageFilterF2F2_New",
"InsightToolkit.itkImageFileReaderF2_New"
] | [((843, 873), 'InsightToolkit.itkImageFileReaderF2_New', 'itk.itkImageFileReaderF2_New', ([], {}), '()\n', (871, 873), True, 'import InsightToolkit as itk\n'), ((924, 980), 'InsightToolkit.itkGradientAnisotropicDiffusionImageFilterF2F2_New', 'itk.itkGradientAnisotropicDiffusionImageFilterF2F2_New', ([], {}), '()\n', (978, 980), True, 'import InsightToolkit as itk\n'), ((1146, 1191), 'InsightToolkit.itkGradientMagnitudeImageFilterF2F2_New', 'itk.itkGradientMagnitudeImageFilterF2F2_New', ([], {}), '()\n', (1189, 1191), True, 'import InsightToolkit as itk\n'), ((1249, 1284), 'InsightToolkit.itkWatershedImageFilterF2_New', 'itk.itkWatershedImageFilterF2_New', ([], {}), '()\n', (1282, 1284), True, 'import InsightToolkit as itk\n'), ((1394, 1425), 'InsightToolkit.itkImageFileWriterUL2_New', 'itk.itkImageFileWriterUL2_New', ([], {}), '()\n', (1423, 1425), True, 'import InsightToolkit as itk\n')] |
import os
import sys
sys.path.append("../../../monk/");
import psutil
from gluon_prototype import prototype
gtf = prototype(verbose=1);
gtf.Prototype("sample-project-1", "sample-experiment-1");
gtf.Default(dataset_path="../../../monk/system_check_tests/datasets/dataset_cats_dogs_train",
model_name="resnet18_v1", freeze_base_network=True, num_epochs=2);
######################################################## Summary #####################################################
gtf.Summary()
###########################################################################################################################
##################################################### EDA - Find Num images per class #####################################
gtf.EDA(show_img=True, save_img=True);
###########################################################################################################################
##################################################### EDA - Find Missing and corrupted images #####################################
gtf.EDA(check_missing=True, check_corrupt=True);
###########################################################################################################################
##################################################### Estimate Training Time #####################################
gtf.Estimate_Train_Time(num_epochs=50);
########################################################################################################################### | [
"gluon_prototype.prototype",
"sys.path.append"
] | [((21, 54), 'sys.path.append', 'sys.path.append', (['"""../../../monk/"""'], {}), "('../../../monk/')\n", (36, 54), False, 'import sys\n'), ((118, 138), 'gluon_prototype.prototype', 'prototype', ([], {'verbose': '(1)'}), '(verbose=1)\n', (127, 138), False, 'from gluon_prototype import prototype\n')] |
"""
Train
=====
Defines functions which train models and write model artifacts to disk.
"""
from __future__ import print_function
import os
import tempfile
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow_mnist import model, paths
def train(path):
"""
Train a decision tree classifier using a floating point feature matrix and
a categorical classification target.
Arguments:
path (str): The path indicating where to save the final model artifacts
"""
# Construct the model graph
graph, x, y, step, initializer, accuracy, prediction = model.build()
# Start a training session
with tf.Session(graph=graph) as sess:
# Initialize the graph
sess.run(initializer)
# Train the model for 1000 steps
mnist = input_data.read_data_sets(tempfile.mkdtemp(), one_hot=True)
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(step, feed_dict={x: batch_xs, y: batch_ys})
# Display accuracy measurement
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y: mnist.test.labels}))
# Save the variable data to disk
os.makedirs(path)
saver = tf.train.Saver()
saver.save(sess, path)
print('Success!')
def main():
"""
Load features and labels, train the neural network, and serialize model
artifact.
Note: This is the training entrypoint used by baklava!
"""
path = paths.model('mnist')
train(path)
| [
"tensorflow_mnist.paths.model",
"os.makedirs",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow_mnist.model.build",
"tempfile.mkdtemp"
] | [((627, 640), 'tensorflow_mnist.model.build', 'model.build', ([], {}), '()\n', (638, 640), False, 'from tensorflow_mnist import model, paths\n'), ((1570, 1590), 'tensorflow_mnist.paths.model', 'paths.model', (['"""mnist"""'], {}), "('mnist')\n", (1581, 1590), False, 'from tensorflow_mnist import model, paths\n'), ((682, 705), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (692, 705), True, 'import tensorflow as tf\n'), ((1274, 1291), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1285, 1291), False, 'import os\n'), ((1308, 1324), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1322, 1324), True, 'import tensorflow as tf\n'), ((861, 879), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (877, 879), False, 'import tempfile\n')] |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.binaryauthorization.v1',
manifest={
'Policy',
'AdmissionWhitelistPattern',
'AdmissionRule',
'Attestor',
'UserOwnedGrafeasNote',
'PkixPublicKey',
'AttestorPublicKey',
},
)
class Policy(proto.Message):
r"""A [policy][google.cloud.binaryauthorization.v1.Policy] for container
image binary authorization.
Attributes:
name (str):
Output only. The resource name, in the format
``projects/*/policy``. There is at most one policy per
project.
description (str):
Optional. A descriptive comment.
global_policy_evaluation_mode (google.cloud.binaryauthorization_v1.types.Policy.GlobalPolicyEvaluationMode):
Optional. Controls the evaluation of a
Google-maintained global admission policy for
common system-level images. Images not covered
by the global policy will be subject to the
project admission policy. This setting has no
effect when specified inside a global admission
policy.
admission_whitelist_patterns (Sequence[google.cloud.binaryauthorization_v1.types.AdmissionWhitelistPattern]):
Optional. Admission policy allowlisting. A
matching admission request will always be
permitted. This feature is typically used to
exclude Google or third-party infrastructure
images from Binary Authorization policies.
cluster_admission_rules (Sequence[google.cloud.binaryauthorization_v1.types.Policy.ClusterAdmissionRulesEntry]):
Optional. Per-cluster admission rules. Cluster spec format:
``location.clusterId``. There can be at most one admission
rule per cluster spec. A ``location`` is either a compute
zone (e.g. us-central1-a) or a region (e.g. us-central1).
For ``clusterId`` syntax restrictions see
https://cloud.google.com/container-engine/reference/rest/v1/projects.zones.clusters.
kubernetes_namespace_admission_rules (Sequence[google.cloud.binaryauthorization_v1.types.Policy.KubernetesNamespaceAdmissionRulesEntry]):
Optional. Per-kubernetes-namespace admission rules. K8s
namespace spec format: [a-z.-]+, e.g. 'some-namespace'
kubernetes_service_account_admission_rules (Sequence[google.cloud.binaryauthorization_v1.types.Policy.KubernetesServiceAccountAdmissionRulesEntry]):
Optional. Per-kubernetes-service-account admission rules.
Service account spec format: ``namespace:serviceaccount``.
e.g. 'test-ns:default'
istio_service_identity_admission_rules (Sequence[google.cloud.binaryauthorization_v1.types.Policy.IstioServiceIdentityAdmissionRulesEntry]):
Optional. Per-istio-service-identity
admission rules. Istio service identity spec
format:
spiffe://<domain>/ns/<namespace>/sa/<serviceaccount>
or <domain>/ns/<namespace>/sa/<serviceaccount>
e.g. spiffe://example.com/ns/test-ns/sa/default
default_admission_rule (google.cloud.binaryauthorization_v1.types.AdmissionRule):
Required. Default admission rule for a
cluster without a per-cluster, per- kubernetes-
service-account, or per-istio-service-identity
admission rule.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the policy was last
updated.
"""
class GlobalPolicyEvaluationMode(proto.Enum):
r""""""
GLOBAL_POLICY_EVALUATION_MODE_UNSPECIFIED = 0
ENABLE = 1
DISABLE = 2
name = proto.Field(
proto.STRING,
number=1,
)
description = proto.Field(
proto.STRING,
number=6,
)
global_policy_evaluation_mode = proto.Field(
proto.ENUM,
number=7,
enum=GlobalPolicyEvaluationMode,
)
admission_whitelist_patterns = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='AdmissionWhitelistPattern',
)
cluster_admission_rules = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=3,
message='AdmissionRule',
)
kubernetes_namespace_admission_rules = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=10,
message='AdmissionRule',
)
kubernetes_service_account_admission_rules = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=8,
message='AdmissionRule',
)
istio_service_identity_admission_rules = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=9,
message='AdmissionRule',
)
default_admission_rule = proto.Field(
proto.MESSAGE,
number=4,
message='AdmissionRule',
)
update_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
class AdmissionWhitelistPattern(proto.Message):
r"""An [admission allowlist
pattern][google.cloud.binaryauthorization.v1.AdmissionWhitelistPattern]
exempts images from checks by [admission
rules][google.cloud.binaryauthorization.v1.AdmissionRule].
Attributes:
name_pattern (str):
An image name pattern to allowlist, in the form
``registry/path/to/image``. This supports a trailing ``*``
wildcard, but this is allowed only in text after the
``registry/`` part. This also supports a trailing ``**``
wildcard which matches subdirectories of a given entry.
"""
name_pattern = proto.Field(
proto.STRING,
number=1,
)
class AdmissionRule(proto.Message):
r"""An [admission
rule][google.cloud.binaryauthorization.v1.AdmissionRule] specifies
either that all container images used in a pod creation request must
be attested to by one or more
[attestors][google.cloud.binaryauthorization.v1.Attestor], that all
pod creations will be allowed, or that all pod creations will be
denied.
Images matching an [admission allowlist
pattern][google.cloud.binaryauthorization.v1.AdmissionWhitelistPattern]
are exempted from admission rules and will never block a pod
creation.
Attributes:
evaluation_mode (google.cloud.binaryauthorization_v1.types.AdmissionRule.EvaluationMode):
Required. How this admission rule will be
evaluated.
require_attestations_by (Sequence[str]):
Optional. The resource names of the attestors that must
attest to a container image, in the format
``projects/*/attestors/*``. Each attestor must exist before
a policy can reference it. To add an attestor to a policy
the principal issuing the policy change request must be able
to read the attestor resource.
Note: this field must be non-empty when the evaluation_mode
field specifies REQUIRE_ATTESTATION, otherwise it must be
empty.
enforcement_mode (google.cloud.binaryauthorization_v1.types.AdmissionRule.EnforcementMode):
Required. The action when a pod creation is
denied by the admission rule.
"""
class EvaluationMode(proto.Enum):
r""""""
EVALUATION_MODE_UNSPECIFIED = 0
ALWAYS_ALLOW = 1
REQUIRE_ATTESTATION = 2
ALWAYS_DENY = 3
class EnforcementMode(proto.Enum):
r"""Defines the possible actions when a pod creation is denied by
an admission rule.
"""
ENFORCEMENT_MODE_UNSPECIFIED = 0
ENFORCED_BLOCK_AND_AUDIT_LOG = 1
DRYRUN_AUDIT_LOG_ONLY = 2
evaluation_mode = proto.Field(
proto.ENUM,
number=1,
enum=EvaluationMode,
)
require_attestations_by = proto.RepeatedField(
proto.STRING,
number=2,
)
enforcement_mode = proto.Field(
proto.ENUM,
number=3,
enum=EnforcementMode,
)
class Attestor(proto.Message):
r"""An [attestor][google.cloud.binaryauthorization.v1.Attestor] that
attests to container image artifacts. An existing attestor cannot be
modified except where indicated.
Attributes:
name (str):
Required. The resource name, in the format:
``projects/*/attestors/*``. This field may not be updated.
description (str):
Optional. A descriptive comment. This field
may be updated. The field may be displayed in
chooser dialogs.
user_owned_grafeas_note (google.cloud.binaryauthorization_v1.types.UserOwnedGrafeasNote):
This specifies how an attestation will be
read, and how it will be used during policy
enforcement.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the attestor was last
updated.
"""
name = proto.Field(
proto.STRING,
number=1,
)
description = proto.Field(
proto.STRING,
number=6,
)
user_owned_grafeas_note = proto.Field(
proto.MESSAGE,
number=3,
oneof='attestor_type',
message='UserOwnedGrafeasNote',
)
update_time = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
class UserOwnedGrafeasNote(proto.Message):
r"""An [user owned Grafeas
note][google.cloud.binaryauthorization.v1.UserOwnedGrafeasNote]
references a Grafeas Attestation.Authority Note created by the user.
Attributes:
note_reference (str):
Required. The Grafeas resource name of a
Attestation.Authority Note, created by the user, in the
format: ``projects/*/notes/*``. This field may not be
updated.
An attestation by this attestor is stored as a Grafeas
Attestation.Authority Occurrence that names a container
image and that links to this Note. Grafeas is an external
dependency.
public_keys (Sequence[google.cloud.binaryauthorization_v1.types.AttestorPublicKey]):
Optional. Public keys that verify
attestations signed by this attestor. This
field may be updated.
If this field is non-empty, one of the specified
public keys must verify that an attestation was
signed by this attestor for the image specified
in the admission request.
If this field is empty, this attestor always
returns that no valid attestations exist.
delegation_service_account_email (str):
Output only. This field will contain the service account
email address that this Attestor will use as the principal
when querying Container Analysis. Attestor administrators
must grant this service account the IAM role needed to read
attestations from the [note_reference][Note] in Container
Analysis (``containeranalysis.notes.occurrences.viewer``).
This email address is fixed for the lifetime of the
Attestor, but callers should not make any other assumptions
about the service account email; future versions may use an
email based on a different naming pattern.
"""
note_reference = proto.Field(
proto.STRING,
number=1,
)
public_keys = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='AttestorPublicKey',
)
delegation_service_account_email = proto.Field(
proto.STRING,
number=3,
)
class PkixPublicKey(proto.Message):
r"""A public key in the PkixPublicKey format (see
https://tools.ietf.org/html/rfc5280#section-4.1.2.7 for
details). Public keys of this type are typically textually
encoded using the PEM format.
Attributes:
public_key_pem (str):
A PEM-encoded public key, as described in
https://tools.ietf.org/html/rfc7468#section-13
signature_algorithm (google.cloud.binaryauthorization_v1.types.PkixPublicKey.SignatureAlgorithm):
The signature algorithm used to verify a message against a
signature using this key. These signature algorithm must
match the structure and any object identifiers encoded in
``public_key_pem`` (i.e. this algorithm must match that of
the public key).
"""
class SignatureAlgorithm(proto.Enum):
r"""Represents a signature algorithm and other information
necessary to verify signatures with a given public key. This is
based primarily on the public key types supported by Tink's
PemKeyType, which is in turn based on KMS's supported signing
algorithms. See https://cloud.google.com/kms/docs/algorithms. In
the future, BinAuthz might support additional public key types
independently of Tink and/or KMS.
"""
_pb_options = {'allow_alias': True}
SIGNATURE_ALGORITHM_UNSPECIFIED = 0
RSA_PSS_2048_SHA256 = 1
RSA_PSS_3072_SHA256 = 2
RSA_PSS_4096_SHA256 = 3
RSA_PSS_4096_SHA512 = 4
RSA_SIGN_PKCS1_2048_SHA256 = 5
RSA_SIGN_PKCS1_3072_SHA256 = 6
RSA_SIGN_PKCS1_4096_SHA256 = 7
RSA_SIGN_PKCS1_4096_SHA512 = 8
ECDSA_P256_SHA256 = 9
EC_SIGN_P256_SHA256 = 9
ECDSA_P384_SHA384 = 10
EC_SIGN_P384_SHA384 = 10
ECDSA_P521_SHA512 = 11
EC_SIGN_P521_SHA512 = 11
public_key_pem = proto.Field(
proto.STRING,
number=1,
)
signature_algorithm = proto.Field(
proto.ENUM,
number=2,
enum=SignatureAlgorithm,
)
class AttestorPublicKey(proto.Message):
r"""An [attestor public
key][google.cloud.binaryauthorization.v1.AttestorPublicKey] that
will be used to verify attestations signed by this attestor.
Attributes:
comment (str):
Optional. A descriptive comment. This field
may be updated.
id (str):
The ID of this public key. Signatures verified by BinAuthz
must include the ID of the public key that can be used to
verify them, and that ID must match the contents of this
field exactly. Additional restrictions on this field can be
imposed based on which public key type is encapsulated. See
the documentation on ``public_key`` cases below for details.
ascii_armored_pgp_public_key (str):
ASCII-armored representation of a PGP public key, as the
entire output by the command
``gpg --export --armor <EMAIL>`` (either LF or CRLF
line endings). When using this field, ``id`` should be left
blank. The BinAuthz API handlers will calculate the ID and
fill it in automatically. BinAuthz computes this ID as the
OpenPGP RFC4880 V4 fingerprint, represented as upper-case
hex. If ``id`` is provided by the caller, it will be
overwritten by the API-calculated ID.
pkix_public_key (google.cloud.binaryauthorization_v1.types.PkixPublicKey):
A raw PKIX SubjectPublicKeyInfo format public key.
NOTE: ``id`` may be explicitly provided by the caller when
using this type of public key, but it MUST be a valid
RFC3986 URI. If ``id`` is left blank, a default one will be
computed based on the digest of the DER encoding of the
public key.
"""
comment = proto.Field(
proto.STRING,
number=1,
)
id = proto.Field(
proto.STRING,
number=2,
)
ascii_armored_pgp_public_key = proto.Field(
proto.STRING,
number=3,
oneof='public_key',
)
pkix_public_key = proto.Field(
proto.MESSAGE,
number=5,
oneof='public_key',
message='PkixPublicKey',
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"proto.RepeatedField",
"proto.Field",
"proto.module",
"proto.MapField"
] | [((705, 914), 'proto.module', 'proto.module', ([], {'package': '"""google.cloud.binaryauthorization.v1"""', 'manifest': "{'Policy', 'AdmissionWhitelistPattern', 'AdmissionRule', 'Attestor',\n 'UserOwnedGrafeasNote', 'PkixPublicKey', 'AttestorPublicKey'}"}), "(package='google.cloud.binaryauthorization.v1', manifest={\n 'Policy', 'AdmissionWhitelistPattern', 'AdmissionRule', 'Attestor',\n 'UserOwnedGrafeasNote', 'PkixPublicKey', 'AttestorPublicKey'})\n", (717, 914), False, 'import proto\n'), ((4484, 4519), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (4495, 4519), False, 'import proto\n'), ((4561, 4596), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(6)'}), '(proto.STRING, number=6)\n', (4572, 4596), False, 'import proto\n'), ((4656, 4722), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(7)', 'enum': 'GlobalPolicyEvaluationMode'}), '(proto.ENUM, number=7, enum=GlobalPolicyEvaluationMode)\n', (4667, 4722), False, 'import proto\n'), ((4789, 4875), 'proto.RepeatedField', 'proto.RepeatedField', (['proto.MESSAGE'], {'number': '(2)', 'message': '"""AdmissionWhitelistPattern"""'}), "(proto.MESSAGE, number=2, message=\n 'AdmissionWhitelistPattern')\n", (4808, 4875), False, 'import proto\n'), ((4932, 5010), 'proto.MapField', 'proto.MapField', (['proto.STRING', 'proto.MESSAGE'], {'number': '(3)', 'message': '"""AdmissionRule"""'}), "(proto.STRING, proto.MESSAGE, number=3, message='AdmissionRule')\n", (4946, 5010), False, 'import proto\n'), ((5093, 5172), 'proto.MapField', 'proto.MapField', (['proto.STRING', 'proto.MESSAGE'], {'number': '(10)', 'message': '"""AdmissionRule"""'}), "(proto.STRING, proto.MESSAGE, number=10, message='AdmissionRule')\n", (5107, 5172), False, 'import proto\n'), ((5261, 5339), 'proto.MapField', 'proto.MapField', (['proto.STRING', 'proto.MESSAGE'], {'number': '(8)', 'message': '"""AdmissionRule"""'}), "(proto.STRING, proto.MESSAGE, number=8, message='AdmissionRule')\n", (5275, 5339), False, 'import proto\n'), ((5424, 5502), 'proto.MapField', 'proto.MapField', (['proto.STRING', 'proto.MESSAGE'], {'number': '(9)', 'message': '"""AdmissionRule"""'}), "(proto.STRING, proto.MESSAGE, number=9, message='AdmissionRule')\n", (5438, 5502), False, 'import proto\n'), ((5571, 5632), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(4)', 'message': '"""AdmissionRule"""'}), "(proto.MESSAGE, number=4, message='AdmissionRule')\n", (5582, 5632), False, 'import proto\n'), ((5682, 5751), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(5)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp)\n', (5693, 5751), False, 'import proto\n'), ((6455, 6490), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (6466, 6490), False, 'import proto\n'), ((8562, 8616), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(1)', 'enum': 'EvaluationMode'}), '(proto.ENUM, number=1, enum=EvaluationMode)\n', (8573, 8616), False, 'import proto\n'), ((8678, 8721), 'proto.RepeatedField', 'proto.RepeatedField', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (8697, 8721), False, 'import proto\n'), ((8768, 8823), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(3)', 'enum': 'EnforcementMode'}), '(proto.ENUM, number=3, enum=EnforcementMode)\n', (8779, 8823), False, 'import proto\n'), ((9800, 9835), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (9811, 9835), False, 'import proto\n'), ((9877, 9912), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(6)'}), '(proto.STRING, number=6)\n', (9888, 9912), False, 'import proto\n'), ((9966, 10062), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(3)', 'oneof': '"""attestor_type"""', 'message': '"""UserOwnedGrafeasNote"""'}), "(proto.MESSAGE, number=3, oneof='attestor_type', message=\n 'UserOwnedGrafeasNote')\n", (9977, 10062), False, 'import proto\n'), ((10115, 10184), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(4)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp)\n', (10126, 10184), False, 'import proto\n'), ((12243, 12278), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (12254, 12278), False, 'import proto\n'), ((12320, 12393), 'proto.RepeatedField', 'proto.RepeatedField', (['proto.MESSAGE'], {'number': '(2)', 'message': '"""AttestorPublicKey"""'}), "(proto.MESSAGE, number=2, message='AttestorPublicKey')\n", (12339, 12393), False, 'import proto\n'), ((12464, 12499), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (12475, 12499), False, 'import proto\n'), ((14457, 14492), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (14468, 14492), False, 'import proto\n'), ((14542, 14600), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(2)', 'enum': 'SignatureAlgorithm'}), '(proto.ENUM, number=2, enum=SignatureAlgorithm)\n', (14553, 14600), False, 'import proto\n'), ((16493, 16528), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (16504, 16528), False, 'import proto\n'), ((16561, 16596), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (16572, 16596), False, 'import proto\n'), ((16655, 16710), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)', 'oneof': '"""public_key"""'}), "(proto.STRING, number=3, oneof='public_key')\n", (16666, 16710), False, 'import proto\n'), ((16764, 16850), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(5)', 'oneof': '"""public_key"""', 'message': '"""PkixPublicKey"""'}), "(proto.MESSAGE, number=5, oneof='public_key', message=\n 'PkixPublicKey')\n", (16775, 16850), False, 'import proto\n')] |
# General imports
import os, json, logging
import click
from pathlib import Path
import yaml
# From common
from luna_core.common.custom_logger import init_logger
from luna_core.common.DataStore import DataStore_v2
from luna_core.common.Node import Node
from luna_core.common.config import ConfigSet
from luna_core.common.sparksession import SparkConfig
@click.command()
@click.option('-a', '--app_config', required=True,
help="application configuration yaml file. See config.yaml.template for details.")
@click.option('-s', '--datastore_id', required=True,
help='datastore name. usually a slide id.')
@click.option('-m', '--method_param_path', required=True,
help='json parameter file with path to a WSI delta table.')
def cli(app_config, datastore_id, method_param_path):
"""Load a slide to the datastore from the whole slide image table.
app_config - application configuration yaml file. See config.yaml.template for details.
datastore_id - datastore name. usually a slide id.
method_param_path - json parameter file with path to a WSI delta table.
- job_tag: job tag to use for loading the slide
- table_path: path to the whole slide image table
- datastore_path: path to store data
"""
init_logger()
with open(method_param_path, 'r') as yaml_file:
method_data = yaml.safe_load(yaml_file)
load_slide_with_datastore(app_config, datastore_id, method_data)
def load_slide_with_datastore(app_config, datastore_id, method_data):
"""Load a slide to the datastore from the whole slide image table.
Args:
app_config (string): path to application configuration file.
datastore_id (string): datastore name. usually a slide id.
method_data (dict): method parameters including input, output details.
Returns:
None
"""
logger = logging.getLogger(f"[datastore={datastore_id}]")
# Do some setup
cfg = ConfigSet("APP_CFG", config_file=app_config)
datastore = DataStore_v2(method_data["datastore_path"])
method_id = method_data["job_tag"]
# fetch patient_id column
patient_id_column = method_data.get("patient_id_column_name", None)
if patient_id_column == "": patient_id_column = None
try:
spark = SparkConfig().spark_session("APP_CFG", "query_slide")
slide_id = datastore_id
if patient_id_column:
# assumes if patient_id column, source is parquet from dremio
# right now has nested row-type into dict, todo: account for map type representation of dict in dremio
df = spark.read.parquet(method_data['table_path'])\
.where(f"UPPER(slide_id)='{slide_id}'")\
.select("path", "metadata", patient_id_column)\
.toPandas()
if not len(df) == 1:
print(df)
raise ValueError(f"Resulting query record is not singular, multiple scan's exist given the container address {slide_id}")
record = df.loc[0]
properties = record['metadata']
properties['patient_id'] = str(record[patient_id_column])
else:
df = spark.read.format("delta").load(method_data['table_path'])\
.where(f"UPPER(slide_id)='{slide_id}'")\
.select("path", "metadata")\
.toPandas()
if not len(df) == 1:
print(df)
raise ValueError(f"Resulting query record is not singular, multiple scan's exist given the container address {slide_id}")
record = df.loc[0]
properties = record['metadata']
spark.stop()
except Exception as e:
logger.exception (f"{e}, stopping job execution...")
raise e
# Put results in the data store
data_path = Path(record['path'].split(':')[-1])
print(data_path)
datastore.put(data_path, datastore_id, method_id, "WholeSlideImage", symlink=True)
with open(os.path.join(method_data["datastore_path"], datastore_id, method_id, "WholeSlideImage", "metadata.json"), "w") as fp:
json.dump(properties, fp)
if __name__ == "__main__":
cli()
| [
"logging.getLogger",
"luna_core.common.custom_logger.init_logger",
"click.option",
"os.path.join",
"luna_core.common.DataStore.DataStore_v2",
"yaml.safe_load",
"luna_core.common.config.ConfigSet",
"luna_core.common.sparksession.SparkConfig",
"click.command",
"json.dump"
] | [((390, 405), 'click.command', 'click.command', ([], {}), '()\n', (403, 405), False, 'import click\n'), ((407, 549), 'click.option', 'click.option', (['"""-a"""', '"""--app_config"""'], {'required': '(True)', 'help': '"""application configuration yaml file. See config.yaml.template for details."""'}), "('-a', '--app_config', required=True, help=\n 'application configuration yaml file. See config.yaml.template for details.'\n )\n", (419, 549), False, 'import click\n'), ((555, 655), 'click.option', 'click.option', (['"""-s"""', '"""--datastore_id"""'], {'required': '(True)', 'help': '"""datastore name. usually a slide id."""'}), "('-s', '--datastore_id', required=True, help=\n 'datastore name. usually a slide id.')\n", (567, 655), False, 'import click\n'), ((666, 787), 'click.option', 'click.option', (['"""-m"""', '"""--method_param_path"""'], {'required': '(True)', 'help': '"""json parameter file with path to a WSI delta table."""'}), "('-m', '--method_param_path', required=True, help=\n 'json parameter file with path to a WSI delta table.')\n", (678, 787), False, 'import click\n'), ((1311, 1324), 'luna_core.common.custom_logger.init_logger', 'init_logger', ([], {}), '()\n', (1322, 1324), False, 'from luna_core.common.custom_logger import init_logger\n'), ((1911, 1959), 'logging.getLogger', 'logging.getLogger', (['f"""[datastore={datastore_id}]"""'], {}), "(f'[datastore={datastore_id}]')\n", (1928, 1959), False, 'import os, json, logging\n'), ((1991, 2035), 'luna_core.common.config.ConfigSet', 'ConfigSet', (['"""APP_CFG"""'], {'config_file': 'app_config'}), "('APP_CFG', config_file=app_config)\n", (2000, 2035), False, 'from luna_core.common.config import ConfigSet\n'), ((2052, 2095), 'luna_core.common.DataStore.DataStore_v2', 'DataStore_v2', (["method_data['datastore_path']"], {}), "(method_data['datastore_path'])\n", (2064, 2095), False, 'from luna_core.common.DataStore import DataStore_v2\n'), ((1400, 1425), 'yaml.safe_load', 'yaml.safe_load', (['yaml_file'], {}), '(yaml_file)\n', (1414, 1425), False, 'import yaml\n'), ((4149, 4174), 'json.dump', 'json.dump', (['properties', 'fp'], {}), '(properties, fp)\n', (4158, 4174), False, 'import os, json, logging\n'), ((4023, 4131), 'os.path.join', 'os.path.join', (["method_data['datastore_path']", 'datastore_id', 'method_id', '"""WholeSlideImage"""', '"""metadata.json"""'], {}), "(method_data['datastore_path'], datastore_id, method_id,\n 'WholeSlideImage', 'metadata.json')\n", (4035, 4131), False, 'import os, json, logging\n'), ((2325, 2338), 'luna_core.common.sparksession.SparkConfig', 'SparkConfig', ([], {}), '()\n', (2336, 2338), False, 'from luna_core.common.sparksession import SparkConfig\n')] |
import numpy as np
import cv2
from matplotlib import pyplot as plt
image = cv2.imread('champaigneditedcompressed.png')
kernel = np.ones((20, 20), np.float32) / 25
img = cv2.filter2D(image, -1, kernel)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,10,0.01,10)
corners = np.int0(corners)
print(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),3,255,-1)
plt.imshow(img),plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.ones",
"cv2.goodFeaturesToTrack",
"numpy.int0",
"cv2.filter2D",
"cv2.circle",
"cv2.cvtColor",
"cv2.imread",
"matplotlib.pyplot.show"
] | [((77, 120), 'cv2.imread', 'cv2.imread', (['"""champaigneditedcompressed.png"""'], {}), "('champaigneditedcompressed.png')\n", (87, 120), False, 'import cv2\n'), ((171, 202), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel'], {}), '(image, -1, kernel)\n', (183, 202), False, 'import cv2\n'), ((210, 247), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (222, 247), False, 'import cv2\n'), ((258, 301), 'cv2.goodFeaturesToTrack', 'cv2.goodFeaturesToTrack', (['gray', '(10)', '(0.01)', '(10)'], {}), '(gray, 10, 0.01, 10)\n', (281, 301), False, 'import cv2\n'), ((309, 325), 'numpy.int0', 'np.int0', (['corners'], {}), '(corners)\n', (316, 325), True, 'import numpy as np\n'), ((130, 159), 'numpy.ones', 'np.ones', (['(20, 20)', 'np.float32'], {}), '((20, 20), np.float32)\n', (137, 159), True, 'import numpy as np\n'), ((383, 418), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', '(3)', '(255)', '(-1)'], {}), '(img, (x, y), 3, 255, -1)\n', (393, 418), False, 'import cv2\n'), ((415, 430), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (425, 430), True, 'from matplotlib import pyplot as plt\n'), ((431, 441), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (439, 441), True, 'from matplotlib import pyplot as plt\n')] |
'''
Created on 12 Jun, 2015
@author: artur.mrowca
'''
from enum import Enum
from PyQt5.Qt import QObject
from PyQt5 import QtCore
from tools.ecu_logging import ECULogger
import copy
class AbstractInputHandler(QObject):
publish_infos_sig = QtCore.pyqtSignal(list)
def __init__(self):
QObject.__init__(self)
self.next = None
self._recs = []
def set_next(self, input_handler):
self.next = input_handler
def subscribe(self, obj, func_name):
'''
all objects that subscribe to this function
publish their information here
'''
self._recs.append(obj)
exec('self.publish_infos_sig.connect(obj.%s)' % func_name)
def publish(self, cur_time, monitor_inputs):
# emit the signal to all connected receivers then call next publish
try:
res = [[monitor_input.time_called, str(monitor_input.mon_id), str(monitor_input.asc_id), str(monitor_input.tag), monitor_input.msg_id, str(monitor_input.message), \
monitor_input.msg_size, monitor_input.stream_id, str(monitor_input.unique_id), str(monitor_input.data)] \
for monitor_input in monitor_inputs.get() if monitor_input.tag in self._get_tags()]
# if there is a result only:
if res:
self.publish_infos_sig.emit(copy.deepcopy(res))
except:
ECULogger().log_traceback()
if self.next != None:
self.next.publish(cur_time, monitor_inputs)
def _get_tags(self):
return []
class BufferHandler(AbstractInputHandler):
def __init__(self):
AbstractInputHandler.__init__(self)
def _get_tags(self):
return [MonitorTags.BT_ECU_RECEIVE_BUFFER, MonitorTags.BT_ECU_TRANSMIT_BUFFER]
class CanBusHandler(AbstractInputHandler):
def __init__(self):
AbstractInputHandler.__init__(self)
def publish(self, cur_time, monitor_inputs):
# emit the signal to all connected receivers then call next publish
try:
res = [[monitor_input.time_called, str(monitor_input.mon_id), str(monitor_input.asc_id), str(monitor_input.tag), monitor_input.msg_id, str(monitor_input.message), \
monitor_input.msg_size, monitor_input.stream_id, str(monitor_input.unique_id), str(monitor_input.data)] \
for monitor_input in monitor_inputs.get() if monitor_input.tag in self._get_tags()]
if res:
self.publish_infos_sig.emit(copy.deepcopy([cur_time, res]))
except:
ECULogger().log_traceback()
if self.next != None:
self.next.publish(cur_time, monitor_inputs)
def _get_tags(self):
return [MonitorTags.CB_DONE_PROCESSING_MESSAGE, MonitorTags.CB_PROCESSING_MESSAGE]
class ConstellationHandler(AbstractInputHandler):
def __init__(self):
AbstractInputHandler.__init__(self)
self.first = True
def publish(self, values, monitor_inputs):
''' pushes the initial constellation exactly once
'''
try:
if self.first:
self.publish_infos_sig.emit(values)
self.first = False
except:
pass
if self.next != None:
self.next.publish(values, monitor_inputs)
def _get_tags(self):
return [MonitorTags.CONSELLATION_INFORMATION]
class EventlineHandler(AbstractInputHandler):
def __init__(self):
AbstractInputHandler.__init__(self)
self._tags_list =[MonitorTags.CP_SEC_INIT_AUTHENTICATION, \
MonitorTags.CP_SEC_RECEIVE_REG_MESSAGE, \
MonitorTags.CP_SEC_DECRYPTED_INNER_REG_MESSAGE, \
MonitorTags.CP_SEC_DECRYPTED_OUTER_REG_MESSAGE, \
MonitorTags.CP_SEC_VALIDATED_ECU_CERTIFICATE, \
MonitorTags.CP_SEC_CREATED_CMP_HASH_REG_MSG, \
MonitorTags.CP_SEC_COMPARED_HASH_REG_MSG, \
MonitorTags.CP_SEC_RECEIVE_REQ_MESSAGE, \
MonitorTags.CP_SEC_DECRYPTED_REQ_MESSAGE, \
MonitorTags.CP_SEC_ECNRYPTED_CONFIRMATION_MESSAGE, \
MonitorTags.CP_SEC_GENERATED_SESSION_KEY, \
MonitorTags.CP_SEC_ENCRYPTED_DENY_MESSAGE, \
MonitorTags.CP_SEC_ENCRYPTED_GRANT_MESSAGE, \
MonitorTags.CP_ECU_RECEIVE_SIMPLE_MESSAGE, \
MonitorTags.CP_ECU_DECRYPTED_SIMPLE_MESSAGE, \
MonitorTags.CP_ECU_INTENT_SEND_SIMPLE_MESSAGE, \
MonitorTags.CP_ECU_ENCRYPTED_SEND_SIMPLE_MESSAGE, \
MonitorTags.CP_ECU_RECEIVE_SEC_MOD_ADVERTISEMENT, \
MonitorTags.CP_ECU_VALIDATED_SEC_MOD_CERTIFICATE, \
MonitorTags.CP_ECU_START_CREATION_REG_MESSAGE, \
MonitorTags.CP_ECU_CREATED_ECU_KEY_REG_MESSAGE, \
MonitorTags.CP_ECU_ENCRYPTED_INNER_REG_MESSAGE, \
MonitorTags.CP_ECU_HASHED_INNER_REG_MESSAGE, \
MonitorTags.CP_ECU_ENCRYPTED_OUTER_REG_MESSAGE, \
MonitorTags.CP_ECU_SEND_REG_MESSAGE, \
MonitorTags.CP_ECU_RECEIVE_CONF_MESSAGE, \
MonitorTags.CP_ECU_DECRYPTED_CONF_MESSAGE, \
MonitorTags.CP_ECU_START_CREATE_REQ_MESSAGE, \
MonitorTags.CP_ECU_ENCRYPTED_REQ_MESSAGE, \
MonitorTags.CP_ECU_RECEIVE_DENY_MESSAGE, \
MonitorTags.CP_ECU_DECRYPTED_DENY_MESSAGE, \
MonitorTags.CP_ECU_RECEIVE_GRANT_MESSAGE, \
MonitorTags.CP_ECU_DECRYPTED_GRANT_MESSAGE, \
MonitorTags.CP_SESSION_AVAILABLE_SEND_MESSAGE, \
MonitorTags.CP_SEND_CLIENT_HELLO, \
MonitorTags.CP_RECEIVE_CLIENT_HELLO, \
MonitorTags.CP_SEND_ALERT_NO_CIPHERSUITE, \
MonitorTags.CP_SEND_SERVER_HELLO, \
MonitorTags.CP_SEND_SERVER_CERTIFICATE, \
MonitorTags.CP_SEND_SERVER_KEYEXCHANGE,
MonitorTags.CP_SEND_CERTIFICATE_REQUEST , \
MonitorTags.CP_SEND_SERVER_HELLO_DONE , \
MonitorTags.CP_RECEIVE_SERVER_HELLO , \
MonitorTags.CP_RECEIVE_SERVER_CERTIFICATE , \
MonitorTags.CP_RECEIVE_SERVER_KEYEXCHANGE , \
MonitorTags.CP_RECEIVE_CERTIFICATE_REQUEST , \
MonitorTags.CP_RECEIVE_SERVER_HELLO_DONE , \
MonitorTags.CP_SERVER_HELLO_DONE_VALIDATED_CERT , \
MonitorTags.CP_SEND_CLIENT_CERTIFICATE , \
MonitorTags.CP_INIT_SEND_CLIENT_KEYEXCHANGE , \
MonitorTags.CP_ENCRYPTED_CLIENT_KEYEXCHANGE , \
MonitorTags.CP_GENERATED_MASTERSEC_CLIENT_KEYEXCHANGE , \
MonitorTags.CP_INIT_SEND_CERTIFICATE_VERIFY , \
MonitorTags.CP_ENCRYPTED_CERTIFICATE_VERIFY , \
MonitorTags.CP_SEND_CIPHER_SPEC , \
MonitorTags.CP_INIT_CLIENT_FINISHED , \
MonitorTags.CP_HASHED_CLIENT_FINISHED , \
MonitorTags.CP_GENERATED_HASH_FROM_PRF_CLIENT_FINISHED , \
MonitorTags.CP_RECEIVE_CLIENT_CERTIFICATE , \
MonitorTags.CP_CLIENT_CERTIFICATE_VALIDATED , \
MonitorTags.CP_RECEIVE_CLIENT_KEYEXCHANGE , \
MonitorTags.CP_DECRYPTED_CLIENT_KEYEXCHANGE , \
MonitorTags.CP_RECEIVE_CERTIFICATE_VERIFY , \
MonitorTags.CP_DECRYPTED_CERTIFICATE_VERIFY , \
MonitorTags.CP_GENERATED_MASTER_SECRET_CERT_VERIFY , \
MonitorTags.CP_RECEIVED_CHANGE_CIPHER_SPEC , \
MonitorTags.CP_RECEIVE_CLIENT_FINISHED , \
MonitorTags.CP_CLIENT_FINISHED_HASHED_COMPARISON_HASH , \
MonitorTags.CP_CLIENT_FINISHED_GENERATED_HASH_PRF , \
MonitorTags.CP_RECEIVE_SERVER_FINISHED , \
MonitorTags.CP_SERVER_FINISHED_HASHED_COMPARISON_HASH , \
MonitorTags.CP_SERVER_FINISHED_GENERATED_HASH_PRF , \
MonitorTags.CP_INIT_SERVER_FINISHED , \
MonitorTags.CP_HASHED_SERVER_FINISHED , \
MonitorTags.CP_GENERATED_HASH_FROM_PRF_SERVER_FINISHED , \
MonitorTags.CP_SERVER_AUTHENTICATED , \
MonitorTags.CP_CLIENT_AUTHENTICATED, \
MonitorTags.CP_RECEIVE_SIMPLE_MESSAGE, \
MonitorTags.CP_INIT_EXCHANGE_FIRST_KEY_KN, \
MonitorTags.CP_ENCRYPTED_EXCHANGE_FIRST_KEY_KN, \
MonitorTags.CP_SETUP_INIT_CREATE_KEYS, \
MonitorTags.CP_SETUP_FINISHED_CREATE_KEYS, \
MonitorTags.CP_INIT_TRANSMIT_MESSAGE, \
MonitorTags.CP_MACED_TRANSMIT_MESSAGE, \
MonitorTags.CP_RECEIVED_SIMPLE_MESSAGE, \
MonitorTags.CP_BUFFERED_SIMPLE_MESSAGE, \
MonitorTags.CP_RETURNED_AUTHENTICATED_SIMPLE_MESSAGE, \
MonitorTags.CP_RECEIVED_EXCHANGE_FIRST_KEY_KN, \
MonitorTags.CP_DECRYPTED_EXCHANGE_FIRST_KEY_KN, \
MonitorTags.CP_CHECKED_KEY_LEGID, \
MonitorTags.CP_INIT_CHECK_KEY_LEGID, \
MonitorTags.CP_INIT_VERIFYING_BUFFER_MESSAGE, \
MonitorTags.CP_FINISHED_VERIFYING_BUFFER_MESSAGE, \
MonitorTags.CP_SEND_SYNC_MESSAGE, \
MonitorTags.CP_SEND_SYNC_RESPONSE_MESSAGE, \
MonitorTags.CP_RECEIVE_SYNC_RESPONSE_MESSAGE]
def publish(self, values, monitor_inputs):
''' pushes the ecu ids or the
view
'''
try:
if values.tag == MonitorTags.ECU_ID_LIST:
self.publish_infos_sig.emit([ecu.ecu_id for ecu in values.data])
else:
AbstractInputHandler.publish(self, values, monitor_inputs)
except:
try:
AbstractInputHandler.publish(self, values, monitor_inputs)
except:
pass
if self.next != None:
self.next.publish(values, monitor_inputs)
def _get_tags(self):
return self._tags_list
def register_tag(self, tag):
self._tags_list += [tag]
def register_tags(self, tags_list):
self._tags_list += tags_list
self._tags_list = list(set(self._tags_list))
class CheckpointHandler(AbstractInputHandler):
''' reads and publishes all Checkpoint Monitor values'''
def __init__(self):
AbstractInputHandler.__init__(self)
self._tags_list = [MonitorTags.CP_SEC_INIT_AUTHENTICATION, \
MonitorTags.CP_SEC_RECEIVE_REG_MESSAGE, \
MonitorTags.CP_SEC_DECRYPTED_INNER_REG_MESSAGE, \
MonitorTags.CP_SEC_DECRYPTED_OUTER_REG_MESSAGE, \
MonitorTags.CP_SEC_VALIDATED_ECU_CERTIFICATE, \
MonitorTags.CP_SEC_CREATED_CMP_HASH_REG_MSG, \
MonitorTags.CP_SEC_COMPARED_HASH_REG_MSG, \
MonitorTags.CP_SEC_RECEIVE_REQ_MESSAGE, \
MonitorTags.CP_SEC_DECRYPTED_REQ_MESSAGE, \
MonitorTags.CP_SEC_ECNRYPTED_CONFIRMATION_MESSAGE, \
MonitorTags.CP_SEC_GENERATED_SESSION_KEY, \
MonitorTags.CP_SEC_ENCRYPTED_DENY_MESSAGE, \
MonitorTags.CP_SEC_ENCRYPTED_GRANT_MESSAGE, \
MonitorTags.CP_ECU_RECEIVE_SIMPLE_MESSAGE, \
MonitorTags.CP_ECU_DECRYPTED_SIMPLE_MESSAGE, \
MonitorTags.CP_ECU_INTENT_SEND_SIMPLE_MESSAGE, \
MonitorTags.CP_ECU_ENCRYPTED_SEND_SIMPLE_MESSAGE, \
MonitorTags.CP_ECU_RECEIVE_SEC_MOD_ADVERTISEMENT, \
MonitorTags.CP_ECU_VALIDATED_SEC_MOD_CERTIFICATE, \
MonitorTags.CP_ECU_START_CREATION_REG_MESSAGE, \
MonitorTags.CP_ECU_CREATED_ECU_KEY_REG_MESSAGE, \
MonitorTags.CP_ECU_ENCRYPTED_INNER_REG_MESSAGE, \
MonitorTags.CP_ECU_HASHED_INNER_REG_MESSAGE, \
MonitorTags.CP_ECU_ENCRYPTED_OUTER_REG_MESSAGE, \
MonitorTags.CP_ECU_SEND_REG_MESSAGE, \
MonitorTags.CP_ECU_RECEIVE_CONF_MESSAGE, \
MonitorTags.CP_ECU_DECRYPTED_CONF_MESSAGE, \
MonitorTags.CP_ECU_START_CREATE_REQ_MESSAGE, \
MonitorTags.CP_ECU_ENCRYPTED_REQ_MESSAGE, \
MonitorTags.CP_ECU_RECEIVE_DENY_MESSAGE, \
MonitorTags.CP_ECU_DECRYPTED_DENY_MESSAGE, \
MonitorTags.CP_ECU_RECEIVE_GRANT_MESSAGE, \
MonitorTags.CP_ECU_DECRYPTED_GRANT_MESSAGE, \
MonitorTags.CP_SESSION_AVAILABLE_SEND_MESSAGE, \
MonitorTags.CP_SEND_CLIENT_HELLO, \
MonitorTags.CP_RECEIVE_CLIENT_HELLO, \
MonitorTags.CP_SEND_ALERT_NO_CIPHERSUITE, \
MonitorTags.CP_SEND_SERVER_HELLO, \
MonitorTags.CP_SEND_SERVER_CERTIFICATE, \
MonitorTags.CP_SEND_SERVER_KEYEXCHANGE,
MonitorTags.CP_SEND_CERTIFICATE_REQUEST , \
MonitorTags.CP_SEND_SERVER_HELLO_DONE , \
MonitorTags.CP_RECEIVE_SERVER_HELLO , \
MonitorTags.CP_RECEIVE_SERVER_CERTIFICATE , \
MonitorTags.CP_RECEIVE_SERVER_KEYEXCHANGE , \
MonitorTags.CP_RECEIVE_CERTIFICATE_REQUEST , \
MonitorTags.CP_RECEIVE_SERVER_HELLO_DONE , \
MonitorTags.CP_SERVER_HELLO_DONE_VALIDATED_CERT , \
MonitorTags.CP_SEND_CLIENT_CERTIFICATE , \
MonitorTags.CP_INIT_SEND_CLIENT_KEYEXCHANGE , \
MonitorTags.CP_ENCRYPTED_CLIENT_KEYEXCHANGE , \
MonitorTags.CP_GENERATED_MASTERSEC_CLIENT_KEYEXCHANGE , \
MonitorTags.CP_INIT_SEND_CERTIFICATE_VERIFY , \
MonitorTags.CP_ENCRYPTED_CERTIFICATE_VERIFY , \
MonitorTags.CP_SEND_CIPHER_SPEC , \
MonitorTags.CP_INIT_CLIENT_FINISHED , \
MonitorTags.CP_HASHED_CLIENT_FINISHED , \
MonitorTags.CP_GENERATED_HASH_FROM_PRF_CLIENT_FINISHED , \
MonitorTags.CP_RECEIVE_CLIENT_CERTIFICATE , \
MonitorTags.CP_CLIENT_CERTIFICATE_VALIDATED , \
MonitorTags.CP_RECEIVE_CLIENT_KEYEXCHANGE , \
MonitorTags.CP_DECRYPTED_CLIENT_KEYEXCHANGE , \
MonitorTags.CP_RECEIVE_CERTIFICATE_VERIFY , \
MonitorTags.CP_DECRYPTED_CERTIFICATE_VERIFY , \
MonitorTags.CP_GENERATED_MASTER_SECRET_CERT_VERIFY , \
MonitorTags.CP_RECEIVED_CHANGE_CIPHER_SPEC , \
MonitorTags.CP_RECEIVE_CLIENT_FINISHED , \
MonitorTags.CP_CLIENT_FINISHED_HASHED_COMPARISON_HASH , \
MonitorTags.CP_CLIENT_FINISHED_GENERATED_HASH_PRF , \
MonitorTags.CP_RECEIVE_SERVER_FINISHED , \
MonitorTags.CP_SERVER_FINISHED_HASHED_COMPARISON_HASH , \
MonitorTags.CP_SERVER_FINISHED_GENERATED_HASH_PRF , \
MonitorTags.CP_INIT_SERVER_FINISHED , \
MonitorTags.CP_HASHED_SERVER_FINISHED , \
MonitorTags.CP_GENERATED_HASH_FROM_PRF_SERVER_FINISHED , \
MonitorTags.CP_SERVER_AUTHENTICATED , \
MonitorTags.CP_CLIENT_AUTHENTICATED, \
MonitorTags.CP_RECEIVE_SIMPLE_MESSAGE, \
MonitorTags.CP_INIT_EXCHANGE_FIRST_KEY_KN, \
MonitorTags.CP_ENCRYPTED_EXCHANGE_FIRST_KEY_KN, \
MonitorTags.CP_SETUP_INIT_CREATE_KEYS, \
MonitorTags.CP_SETUP_FINISHED_CREATE_KEYS, \
MonitorTags.CP_INIT_TRANSMIT_MESSAGE, \
MonitorTags.CP_MACED_TRANSMIT_MESSAGE, \
MonitorTags.CP_RECEIVED_SIMPLE_MESSAGE, \
MonitorTags.CP_BUFFERED_SIMPLE_MESSAGE, \
MonitorTags.CP_RETURNED_AUTHENTICATED_SIMPLE_MESSAGE, \
MonitorTags.CP_RECEIVED_EXCHANGE_FIRST_KEY_KN, \
MonitorTags.CP_DECRYPTED_EXCHANGE_FIRST_KEY_KN, \
MonitorTags.CP_INIT_CHECK_KEY_LEGID, \
MonitorTags.CP_CHECKED_KEY_LEGID, \
MonitorTags.CP_INIT_VERIFYING_BUFFER_MESSAGE, \
MonitorTags.CP_FINISHED_VERIFYING_BUFFER_MESSAGE, \
MonitorTags.CP_SEND_SYNC_MESSAGE, \
MonitorTags.CP_SEND_SYNC_RESPONSE_MESSAGE, \
MonitorTags.CP_RECEIVE_SYNC_RESPONSE_MESSAGE]
# override
def publish(self, cur_time, monitor_inputs):
# emit the signal to all connected receivers then call next publish
try:
res = [[monitor_input.time_called, str(monitor_input.mon_id), str(monitor_input.asc_id), str(monitor_input.tag), monitor_input.msg_id, str(monitor_input.message), \
monitor_input.msg_size, monitor_input.stream_id, str(monitor_input.unique_id), str(monitor_input.data)] \
for monitor_input in monitor_inputs.get() if monitor_input.tag in self._get_tags()]
self.publish_infos_sig.emit([None, None])
except:
ECULogger().log_traceback()
if self.next != None:
self.next.publish(cur_time, monitor_inputs)
def _get_tags(self):
return self._tags_list
def register_tag(self, tag):
self._tags_list += [tag]
def register_tags(self, tags_list):
self._tags_list += tags_list
class InputHandlerChain(object):
def add_handler(self, handler):
try:
self._next_handler.set_next(handler)
except:
self._handler = handler
self._next_handler = handler
return handler
def handler(self):
return self._handler
class MonitorInput(object):
'''
Keeps the input data of a monitor. Tag defines the type of data arriving
Possible Tags are MonitorTags
'''
def __init__(self, data, monitor_tag, mon_id=False, time_called=False, asc_id=None, \
msg_id=-1, message=None, msg_size=-1, stream_id=-1, unique_id=None):
self.data = data
self.tag = monitor_tag
self.mon_id = mon_id
self.time_called = time_called
self.calling_object = None
self.asc_id = asc_id
self.msg_id = msg_id
self.message = message
self.msg_size = msg_size
self.stream_id = stream_id
self.unique_id = unique_id
class MonitorTags(Enum):
# Buffer Tags
BT_ECU_TRANSMIT_BUFFER = 1
BT_ECU_RECEIVE_BUFFER = 2
# Receiving/Sending Times, Message Contents SPAETER MIT CHECKPOINTS ZUSAMMENFUEHREN
# Checkpoints - SEC MOD SIDE
CP_SEC_INIT_AUTHENTICATION = 7 # Sec Mod. initialized authentication
CP_SEC_RECEIVE_REG_MESSAGE = 8 # Sec Mod receive the registration message
CP_SEC_DECRYPTED_INNER_REG_MESSAGE = 10 # Sec Mod decrypted inner reg Msg
CP_SEC_DECRYPTED_OUTER_REG_MESSAGE = 11 # Sec Mod decrypted outer reg Msg
CP_SEC_VALIDATED_ECU_CERTIFICATE = 12 # Sec Mod. validated the ECU Certificate
CP_SEC_CREATED_CMP_HASH_REG_MSG = 13 # Create the comparision hash for validation of reg msg hash
CP_SEC_COMPARED_HASH_REG_MSG = 14 # Finished comparing the reg msg hash to the created hash and sends the message
CP_SEC_RECEIVE_REQ_MESSAGE = 15 # Sec Mod. received the request message
CP_SEC_DECRYPTED_REQ_MESSAGE = 16 # Sec Mod decrypted the request message
CP_SEC_ECNRYPTED_CONFIRMATION_MESSAGE = 9 # Sec Mod created confirmation message and sends it
CP_SEC_GENERATED_SESSION_KEY = 17 # Sec Mod. generated the session key
CP_SEC_ENCRYPTED_DENY_MESSAGE = 18 # Sec. Mod encrypted the deny message
CP_SEC_ENCRYPTED_GRANT_MESSAGE = 19 # Sec. Mod encrypted the grant message
# Checkpoints - ECU SIDE
CP_ECU_RECEIVE_SIMPLE_MESSAGE = 20 # ECU receives a encrypted simple message
CP_ECU_DECRYPTED_SIMPLE_MESSAGE = 21 # ECU decrypted the received simple message
CP_ECU_INTENT_SEND_SIMPLE_MESSAGE = 22 # ECU decides on comm. module that it wants to send a simple message
CP_ECU_ENCRYPTED_SEND_SIMPLE_MESSAGE = 23 # ECU encrypted message and sends it
CP_ECU_RECEIVE_SEC_MOD_ADVERTISEMENT = 24 # ECU receives the SEC Module advertisement
CP_ECU_VALIDATED_SEC_MOD_CERTIFICATE = 25 # ECU validated the sec. mod. certificate
CP_ECU_START_CREATION_REG_MESSAGE = 26 # ECU starts to create the registration message
CP_ECU_CREATED_ECU_KEY_REG_MESSAGE = 27 # ECU created the sym. ECU key
CP_ECU_ENCRYPTED_INNER_REG_MESSAGE = 28 # ECU encrypted inner reg. msg
CP_ECU_HASHED_INNER_REG_MESSAGE = 29 # ECU hashed inner reg. msg
CP_ECU_ENCRYPTED_OUTER_REG_MESSAGE = 30 # ECU encrypted the outer reg. msg
CP_ECU_SEND_REG_MESSAGE = 31 # ECU sends the reg. message
CP_ECU_RECEIVE_CONF_MESSAGE = 32 # ECU receives a confirmation message
CP_ECU_DECRYPTED_CONF_MESSAGE = 33 # ECU decrypted the confirmation message
CP_ECU_START_CREATE_REQ_MESSAGE = 34 # ECU Starts creation of request message
CP_ECU_ENCRYPTED_REQ_MESSAGE = 35 # ECU encrypted the request message and sends it
CP_ECU_RECEIVE_DENY_MESSAGE = 36 # ECU receives a deny message
CP_ECU_DECRYPTED_DENY_MESSAGE = 37 # ECU decrypted the deny message
CP_ECU_RECEIVE_GRANT_MESSAGE = 38 # ECU receives a grant message
CP_ECU_DECRYPTED_GRANT_MESSAGE = 39 # ECU decrypted the grant message
CP_ECU_ALREADY_AUTHENTICATED = 40 # The ECU is already authenticated and the message is discareded
# Checkpoints - TLS
CP_SESSION_AVAILABLE_SEND_MESSAGE = 43 # There is a session available for this stream and the message is transmitted
CP_SEND_CLIENT_HELLO = 44 # No session is available for that stream. Send the client hello message
CP_RECEIVE_CLIENT_HELLO = 45 # Receive the client hello and answer
CP_SEND_ALERT_NO_CIPHERSUITE = 46 # alert message if the wrong ciphersuite was chosen
CP_SEND_SERVER_HELLO = 47 # send the server Hello message
CP_SEND_SERVER_CERTIFICATE = 48 # send the server Certificate message
CP_SEND_SERVER_KEYEXCHANGE = 49 # send the server Keyexchange message
CP_SEND_CERTIFICATE_REQUEST = 50 # send the certificate request message
CP_SEND_SERVER_HELLO_DONE = 51 # send the server Hello done message
CP_RECEIVE_SERVER_HELLO = 52
CP_RECEIVE_SERVER_CERTIFICATE = 53
CP_RECEIVE_SERVER_KEYEXCHANGE = 54
CP_RECEIVE_CERTIFICATE_REQUEST = 55
CP_RECEIVE_SERVER_HELLO_DONE = 56
CP_SERVER_HELLO_DONE_VALIDATED_CERT = 57
CP_SEND_CLIENT_CERTIFICATE = 58
CP_INIT_SEND_CLIENT_KEYEXCHANGE = 59
CP_ENCRYPTED_CLIENT_KEYEXCHANGE = 60
CP_GENERATED_MASTERSEC_CLIENT_KEYEXCHANGE = 61
CP_INIT_SEND_CERTIFICATE_VERIFY = 62
CP_ENCRYPTED_CERTIFICATE_VERIFY = 63
CP_SEND_CIPHER_SPEC = 64 # Send the cipher spec message
CP_INIT_CLIENT_FINISHED = 65 # start to create client finished message
CP_HASHED_CLIENT_FINISHED = 66 # finished to hash the client finished message data
CP_GENERATED_HASH_FROM_PRF_CLIENT_FINISHED = 67 # Hash generated and sending message started
CP_RECEIVE_CLIENT_CERTIFICATE = 68 # receive the client certificate
CP_CLIENT_CERTIFICATE_VALIDATED = 69 # Finished validation of client certificate
CP_RECEIVE_CLIENT_KEYEXCHANGE = 70
CP_DECRYPTED_CLIENT_KEYEXCHANGE = 71
CP_RECEIVE_CERTIFICATE_VERIFY = 72
CP_DECRYPTED_CERTIFICATE_VERIFY = 73
CP_GENERATED_MASTER_SECRET_CERT_VERIFY = 74
CP_RECEIVED_CHANGE_CIPHER_SPEC = 75
# CP_RECEIVED_CLIENT_FINISHED = 76
CP_RECEIVE_CLIENT_FINISHED = 83
CP_CLIENT_FINISHED_HASHED_COMPARISON_HASH = 84
CP_CLIENT_FINISHED_GENERATED_HASH_PRF = 85
CP_RECEIVE_SERVER_FINISHED = 80
CP_SERVER_FINISHED_HASHED_COMPARISON_HASH = 81
CP_SERVER_FINISHED_GENERATED_HASH_PRF = 82
CP_INIT_SERVER_FINISHED = 77 # start to create SERVER finished message
CP_HASHED_SERVER_FINISHED = 78 # finished to hash the SERVER finished message data
CP_GENERATED_HASH_FROM_PRF_SERVER_FINISHED = 79 # Hash generated and sending message started
CP_SERVER_AUTHENTICATED = 86
CP_CLIENT_AUTHENTICATED = 87
CP_RECEIVE_SIMPLE_MESSAGE = 88
# Checkpoints - TESLA
CP_INIT_EXCHANGE_FIRST_KEY_KN = 89 # Intention to send the Key K N to receiver xy
CP_ENCRYPTED_EXCHANGE_FIRST_KEY_KN = 90 # Encryption finished for Key K_N to receiver xy
CP_SETUP_INIT_CREATE_KEYS = 91 # Start the creation of keys
CP_SETUP_FINISHED_CREATE_KEYS = 92 # Finished creating keys
CP_INIT_TRANSMIT_MESSAGE = 93 # Intention to send a simple message
CP_MACED_TRANSMIT_MESSAGE = 94 # Finished MAC Creation of message now send it
CP_RECEIVED_SIMPLE_MESSAGE = 95 # Received a simple message
CP_BUFFERED_SIMPLE_MESSAGE = 96 # Added simple message to buffer
CP_RETURNED_AUTHENTICATED_SIMPLE_MESSAGE = 97 # Authenticated messages are returned
CP_RECEIVED_EXCHANGE_FIRST_KEY_KN = 98 # received first key message
CP_DECRYPTED_EXCHANGE_FIRST_KEY_KN = 99 # decrypted first key message
CP_INIT_CHECK_KEY_LEGID = 99.5 # start to check if key legid
CP_CHECKED_KEY_LEGID = 100 # checked the key legidity
CP_INIT_VERIFYING_BUFFER_MESSAGE = 101 # Start validation of message in buffer
CP_FINISHED_VERIFYING_BUFFER_MESSAGE = 102 # Done validation of message in buffer
CP_SEND_SYNC_MESSAGE = 103 # send the time sync message from the ECU
CP_SEND_SYNC_RESPONSE_MESSAGE = 104
CP_RECEIVE_SYNC_RESPONSE_MESSAGE = 105 # End message
CP_RECEIVE_SYNC_MESSAGE = 106 # sync message was received
# CAN BUS TAGS
CB_DONE_PROCESSING_MESSAGE = 41
CB_PROCESSING_MESSAGE = 42
# Constellation Handler
CONSELLATION_INFORMATION = 107
ECU_ID_LIST = 108
| [
"PyQt5.Qt.QObject.__init__",
"PyQt5.QtCore.pyqtSignal",
"tools.ecu_logging.ECULogger",
"copy.deepcopy"
] | [((258, 281), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['list'], {}), '(list)\n', (275, 281), False, 'from PyQt5 import QtCore\n'), ((322, 344), 'PyQt5.Qt.QObject.__init__', 'QObject.__init__', (['self'], {}), '(self)\n', (338, 344), False, 'from PyQt5.Qt import QObject\n'), ((1429, 1447), 'copy.deepcopy', 'copy.deepcopy', (['res'], {}), '(res)\n', (1442, 1447), False, 'import copy\n'), ((2673, 2703), 'copy.deepcopy', 'copy.deepcopy', (['[cur_time, res]'], {}), '([cur_time, res])\n', (2686, 2703), False, 'import copy\n'), ((1479, 1490), 'tools.ecu_logging.ECULogger', 'ECULogger', ([], {}), '()\n', (1488, 1490), False, 'from tools.ecu_logging import ECULogger\n'), ((2735, 2746), 'tools.ecu_logging.ECULogger', 'ECULogger', ([], {}), '()\n', (2744, 2746), False, 'from tools.ecu_logging import ECULogger\n'), ((16655, 16666), 'tools.ecu_logging.ECULogger', 'ECULogger', ([], {}), '()\n', (16664, 16666), False, 'from tools.ecu_logging import ECULogger\n')] |
################################################################################
# Copyright: <NAME> 2019
#
# Apache 2.0 License
#
# This file contains all code related to pid check objects
#
################################################################################
import re
import requests
from breadp.checks import Check
from breadp.checks.result import BooleanResult
class IsValidDoiCheck(Check):
""" Checks whether an RDP has a valid DOI as PID
Methods
-------
_do_check(self, rdp)
returns a BooleanResult
"""
def __init__(self):
Check.__init__(self)
self.id = 0
self.version = "0.0.1"
def _do_check(self, rdp):
if not rdp.pid:
msg = "RDP has no PID"
return BooleanResult(False, msg, False)
if re.match(r"^10\.\d{4}\d*/.*", rdp.pid):
return BooleanResult(True, "", True)
msg = "{} is not a valid DOI".format(rdp.pid)
return BooleanResult(False, msg, True)
class DoiResolvesCheck(Check):
""" Checks whether the DOI of an RDP resolves
Methods
-------
_do_check(self, rdp)
returns a BooleanResult
"""
def __init__(self):
Check.__init__(self)
self.id = 1
self.version = "0.0.1"
def _do_check(self, rdp):
if not rdp.pid:
msg = "RDP has no PID"
return BooleanResult(False, msg, False)
try:
response = requests.head('https://doi.org/' + rdp.pid)
except Exception as e:
msg = "{}: {}".format(type(e).__name__, e)
return BooleanResult(False, msg, False)
if response.status_code != 302:
msg = "Could not resolve {}, status code: {}".format(
rdp.pid, response.status_code)
return BooleanResult(False, msg, True)
msg = "Location of resolved doi: {}".format(
response.headers.get('Location'))
return BooleanResult(True, msg, True)
| [
"breadp.checks.Check.__init__",
"requests.head",
"re.match",
"breadp.checks.result.BooleanResult"
] | [((586, 606), 'breadp.checks.Check.__init__', 'Check.__init__', (['self'], {}), '(self)\n', (600, 606), False, 'from breadp.checks import Check\n'), ((811, 851), 're.match', 're.match', (['"""^10\\\\.\\\\d{4}\\\\d*/.*"""', 'rdp.pid'], {}), "('^10\\\\.\\\\d{4}\\\\d*/.*', rdp.pid)\n", (819, 851), False, 'import re\n'), ((969, 1000), 'breadp.checks.result.BooleanResult', 'BooleanResult', (['(False)', 'msg', '(True)'], {}), '(False, msg, True)\n', (982, 1000), False, 'from breadp.checks.result import BooleanResult\n'), ((1205, 1225), 'breadp.checks.Check.__init__', 'Check.__init__', (['self'], {}), '(self)\n', (1219, 1225), False, 'from breadp.checks import Check\n'), ((1957, 1987), 'breadp.checks.result.BooleanResult', 'BooleanResult', (['(True)', 'msg', '(True)'], {}), '(True, msg, True)\n', (1970, 1987), False, 'from breadp.checks.result import BooleanResult\n'), ((767, 799), 'breadp.checks.result.BooleanResult', 'BooleanResult', (['(False)', 'msg', '(False)'], {}), '(False, msg, False)\n', (780, 799), False, 'from breadp.checks.result import BooleanResult\n'), ((870, 899), 'breadp.checks.result.BooleanResult', 'BooleanResult', (['(True)', '""""""', '(True)'], {}), "(True, '', True)\n", (883, 899), False, 'from breadp.checks.result import BooleanResult\n'), ((1386, 1418), 'breadp.checks.result.BooleanResult', 'BooleanResult', (['(False)', 'msg', '(False)'], {}), '(False, msg, False)\n', (1399, 1418), False, 'from breadp.checks.result import BooleanResult\n'), ((1455, 1498), 'requests.head', 'requests.head', (["('https://doi.org/' + rdp.pid)"], {}), "('https://doi.org/' + rdp.pid)\n", (1468, 1498), False, 'import requests\n'), ((1810, 1841), 'breadp.checks.result.BooleanResult', 'BooleanResult', (['(False)', 'msg', '(True)'], {}), '(False, msg, True)\n', (1823, 1841), False, 'from breadp.checks.result import BooleanResult\n'), ((1604, 1636), 'breadp.checks.result.BooleanResult', 'BooleanResult', (['(False)', 'msg', '(False)'], {}), '(False, msg, False)\n', (1617, 1636), False, 'from breadp.checks.result import BooleanResult\n')] |
from unittest import TestCase
from src.domain.holiday import Holiday
import src.infrastructure.persistence.holiday_dynamo_repository as repository
HOLIDAY = Holiday(
date='2021-12-25',
name='Natal',
category='NATIONAL'
)
class TestHolidayDynamoRepository(TestCase):
def test_holiday_must_be_dict_when_given_as_object(self):
response = repository.translate_holiday_to_dynamo(HOLIDAY)
self.assertIsInstance(response, dict, 'Holiday was not returned as a dict')
| [
"src.domain.holiday.Holiday",
"src.infrastructure.persistence.holiday_dynamo_repository.translate_holiday_to_dynamo"
] | [((158, 219), 'src.domain.holiday.Holiday', 'Holiday', ([], {'date': '"""2021-12-25"""', 'name': '"""Natal"""', 'category': '"""NATIONAL"""'}), "(date='2021-12-25', name='Natal', category='NATIONAL')\n", (165, 219), False, 'from src.domain.holiday import Holiday\n'), ((363, 410), 'src.infrastructure.persistence.holiday_dynamo_repository.translate_holiday_to_dynamo', 'repository.translate_holiday_to_dynamo', (['HOLIDAY'], {}), '(HOLIDAY)\n', (401, 410), True, 'import src.infrastructure.persistence.holiday_dynamo_repository as repository\n')] |
import logging
import tensorflow as tf
import recsys.util.proto.config_pb2 as config
def int64_feature(val):
return tf.train.Feature(int64_list = tf.train.Int64List(value=[val]))
def int64_list_feature(val_list):
return tf.train.Feature(int64_list = tf.train.Int64List(value=val_list))
def bytes_feature(val):
return tf.train.Feature(bytes_list = tf.train.BytesList(value=[val]))
def bytes_list_feature(val_list):
return tf.train.Feature(bytes_list = tf.train.BytesList(value=val_list))
def float_feature(val):
return tf.train.Feature(float_list = tf.train.FloatList(value=[val]))
def float_list_feature(val_list):
return tf.train.Feature(float_list = tf.train.FloatList(value=val_list))
def string_feature(str):
return tf.train.Feature(bytes_list = tf.train.BytesList(value=[str.encode('utf-8')]))
def string_list_feature(str_list):
str_bytes_list = [k.encode('utf-8') for k in str_list]
return bytes_list_feature(str_bytes_list)
# Constructs a tf.Example with feature dictionary where key is feature name and
# value is tf.train.Feature
def example_from_feature_dict(feature_dict):
return tf.train.Example(features = tf.train.Features(feature = feature_dict))
def get_int64_feature(example, feature_name):
return example.features.feature[feature_name].int64_list.value[0]
def get_int64_list_feature(example, feature_name):
return list(example.features.feature[feature_name].int64_list.value)
def get_float_feature(example, feature_name):
return example.features.feature[feature_name].float_list.value[0]
def get_float_list_feature(example, feature_name):
return list(example.features.feature[feature_name].float_list.value)
def get_bytes_feature(example, feature_name):
return example.features.feature[feature_name].bytes_list.value[0]
def get_bytes_list_feature(example, feature_name):
return example.features.feature[feature_name].bytes_list.value
def get_string_feature(example, feature_name):
return example.features.feature[feature_name].bytes_list.value[0].decode('utf-8')
def get_string_list_feature(example, feature_name):
return [s.decode('utf-8') for s in example.features.feature[feature_name].bytes_list.value]
# Reads batched features and labels from given files, and consumes them through
# callback function "consum_batch_fn".
# feature_spec: dictionary specifying the type of each feature.
# input_config: settings for generating batched features and labels.
# consume_batch_fn: callback function that defines the consumption of the
# batched features and labels.
def fetch_and_process_features(filenames, feature_spec, input_config, consume_batch_fn):
# Reads examples from the filenames and parse them into features.
def _read_and_decode(filename_queue, feature_spec, batch_size = 2, capacity = 30, num_threads = 2, min_after_dequeue = 10):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example, features = feature_spec)
batched_features = tf.train.shuffle_batch(features,
batch_size = batch_size,
capacity = capacity,
num_threads = num_threads,
min_after_dequeue = min_after_dequeue)
return batched_features
filename_queue = tf.train.string_input_producer(
filenames, num_epochs = input_config.num_epochs)
features = _read_and_decode(
filename_queue,
feature_spec,
batch_size = input_config.batch_size,
capacity = input_config.capacity,
num_threads = input_config.num_threads,
min_after_dequeue = input_config.min_after_dequeue
)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(input_config.num_batches):
logging.info('current batch:{}'.format(i))
consume_batch_fn(sess, features)
coord.request_stop()
coord.join(threads) | [
"tensorflow.local_variables_initializer",
"tensorflow.train.Coordinator",
"tensorflow.parse_single_example",
"tensorflow.Session",
"tensorflow.train.start_queue_runners",
"tensorflow.train.Int64List",
"tensorflow.train.BytesList",
"tensorflow.global_variables_initializer",
"tensorflow.train.Features... | [((3201, 3278), 'tensorflow.train.string_input_producer', 'tf.train.string_input_producer', (['filenames'], {'num_epochs': 'input_config.num_epochs'}), '(filenames, num_epochs=input_config.num_epochs)\n', (3231, 3278), True, 'import tensorflow as tf\n'), ((2818, 2837), 'tensorflow.TFRecordReader', 'tf.TFRecordReader', ([], {}), '()\n', (2835, 2837), True, 'import tensorflow as tf\n'), ((2905, 2971), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['serialized_example'], {'features': 'feature_spec'}), '(serialized_example, features=feature_spec)\n', (2928, 2971), True, 'import tensorflow as tf\n'), ((2999, 3139), 'tensorflow.train.shuffle_batch', 'tf.train.shuffle_batch', (['features'], {'batch_size': 'batch_size', 'capacity': 'capacity', 'num_threads': 'num_threads', 'min_after_dequeue': 'min_after_dequeue'}), '(features, batch_size=batch_size, capacity=capacity,\n num_threads=num_threads, min_after_dequeue=min_after_dequeue)\n', (3021, 3139), True, 'import tensorflow as tf\n'), ((3543, 3576), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3574, 3576), True, 'import tensorflow as tf\n'), ((3597, 3629), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (3627, 3629), True, 'import tensorflow as tf\n'), ((3637, 3649), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3647, 3649), True, 'import tensorflow as tf\n'), ((3696, 3718), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (3716, 3718), True, 'import tensorflow as tf\n'), ((3734, 3775), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), '(coord=coord)\n', (3762, 3775), True, 'import tensorflow as tf\n'), ((151, 182), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[val]'}), '(value=[val])\n', (169, 182), True, 'import tensorflow as tf\n'), ((260, 294), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'val_list'}), '(value=val_list)\n', (278, 294), True, 'import tensorflow as tf\n'), ((359, 390), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[val]'}), '(value=[val])\n', (377, 390), True, 'import tensorflow as tf\n'), ((465, 499), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': 'val_list'}), '(value=val_list)\n', (483, 499), True, 'import tensorflow as tf\n'), ((564, 595), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': '[val]'}), '(value=[val])\n', (582, 595), True, 'import tensorflow as tf\n'), ((670, 704), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'val_list'}), '(value=val_list)\n', (688, 704), True, 'import tensorflow as tf\n'), ((1144, 1183), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature_dict'}), '(feature=feature_dict)\n', (1161, 1183), True, 'import tensorflow as tf\n')] |
'''
This module handles starring of modules.
'''
import web
from app import RENDER
from components import model, session
class StarModule(object):
'''
Class handles starring and unstarring of modules.
'''
def GET(self):
'''
This function is called when /starModule is accessed.
'''
web.header('X-Frame-Options', 'SAMEORIGIN')
web.header('X-Content-Type-Options', 'nosniff')
web.header('X-XSS-Protection', '1')
if not session.validate_session():
raise web.seeother('/login')
else:
module_code = web.input().code
action = web.input().action
return_path = web.input().return_path
# modify return path if individual module info to include aySem
if return_path == '/individualModuleInfo':
target_ay = web.input().aysem
return_path = return_path + '?code=' + module_code + '&aysem=' + target_ay
if action == "star":
model.star_module(module_code, web.cookies().get('user'))
else:
model.unstar_module(module_code, web.cookies().get('user'))
raise web.seeother(return_path)
class StarredModulesList(object):
'''
Class handles showing of starredModules
'''
URL_THIS_PAGE = '/starredModules'
def GET(self):
'''
This function is called when /starredModules is accessed.
'''
web.header('X-Frame-Options', 'SAMEORIGIN')
web.header('X-Content-Type-Options', 'nosniff')
web.header('X-XSS-Protection', '1')
if not session.validate_session():
raise web.seeother('/login')
else:
starred_module_infos = model.get_starred_modules(web.cookies().get('user'))
return RENDER.starredModulesListing(starred_module_infos)
| [
"components.session.validate_session",
"web.seeother",
"app.RENDER.starredModulesListing",
"web.cookies",
"web.input",
"web.header"
] | [((346, 389), 'web.header', 'web.header', (['"""X-Frame-Options"""', '"""SAMEORIGIN"""'], {}), "('X-Frame-Options', 'SAMEORIGIN')\n", (356, 389), False, 'import web\n'), ((398, 445), 'web.header', 'web.header', (['"""X-Content-Type-Options"""', '"""nosniff"""'], {}), "('X-Content-Type-Options', 'nosniff')\n", (408, 445), False, 'import web\n'), ((454, 489), 'web.header', 'web.header', (['"""X-XSS-Protection"""', '"""1"""'], {}), "('X-XSS-Protection', '1')\n", (464, 489), False, 'import web\n'), ((1494, 1537), 'web.header', 'web.header', (['"""X-Frame-Options"""', '"""SAMEORIGIN"""'], {}), "('X-Frame-Options', 'SAMEORIGIN')\n", (1504, 1537), False, 'import web\n'), ((1546, 1593), 'web.header', 'web.header', (['"""X-Content-Type-Options"""', '"""nosniff"""'], {}), "('X-Content-Type-Options', 'nosniff')\n", (1556, 1593), False, 'import web\n'), ((1602, 1637), 'web.header', 'web.header', (['"""X-XSS-Protection"""', '"""1"""'], {}), "('X-XSS-Protection', '1')\n", (1612, 1637), False, 'import web\n'), ((505, 531), 'components.session.validate_session', 'session.validate_session', ([], {}), '()\n', (529, 531), False, 'from components import model, session\n'), ((551, 573), 'web.seeother', 'web.seeother', (['"""/login"""'], {}), "('/login')\n", (563, 573), False, 'import web\n'), ((1208, 1233), 'web.seeother', 'web.seeother', (['return_path'], {}), '(return_path)\n', (1220, 1233), False, 'import web\n'), ((1653, 1679), 'components.session.validate_session', 'session.validate_session', ([], {}), '()\n', (1677, 1679), False, 'from components import model, session\n'), ((1699, 1721), 'web.seeother', 'web.seeother', (['"""/login"""'], {}), "('/login')\n", (1711, 1721), False, 'import web\n'), ((1843, 1893), 'app.RENDER.starredModulesListing', 'RENDER.starredModulesListing', (['starred_module_infos'], {}), '(starred_module_infos)\n', (1871, 1893), False, 'from app import RENDER\n'), ((614, 625), 'web.input', 'web.input', ([], {}), '()\n', (623, 625), False, 'import web\n'), ((652, 663), 'web.input', 'web.input', ([], {}), '()\n', (661, 663), False, 'import web\n'), ((697, 708), 'web.input', 'web.input', ([], {}), '()\n', (706, 708), False, 'import web\n'), ((880, 891), 'web.input', 'web.input', ([], {}), '()\n', (889, 891), False, 'import web\n'), ((1797, 1810), 'web.cookies', 'web.cookies', ([], {}), '()\n', (1808, 1810), False, 'import web\n'), ((1069, 1082), 'web.cookies', 'web.cookies', ([], {}), '()\n', (1080, 1082), False, 'import web\n'), ((1163, 1176), 'web.cookies', 'web.cookies', ([], {}), '()\n', (1174, 1176), False, 'import web\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 16:21:35 2021
@author: <NAME> <<EMAIL>>
"""
import os
import logging
import pathlib
import pycountry
import mongoengine
from enum import Enum
from typing import Union
from pymongo import database, ReturnDocument
from dotenv import find_dotenv, load_dotenv
from .utils import get_project_dir
SPECIES2CODE = {
"Sheep": "OA",
"Goat": "CH"
}
SMARTERDB = "smarter"
DB_ALIAS = "smarterdb"
# Get an instance of a logger
logger = logging.getLogger(__name__)
class SmarterDBException(Exception):
pass
def global_connection(database_name: str = SMARTERDB):
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
# TODO: track connection somewhere
return mongoengine.connect(
database_name,
username=os.getenv("MONGODB_SMARTER_USER"),
password=os.getenv("<PASSWORD>"),
authentication_source='admin',
alias=DB_ALIAS)
def complement(genotype: str):
bases = {
"A": "T",
"T": "A",
"C": "G",
"G": "C",
"/": "/"
}
result = ""
for base in genotype:
result += bases[base]
return result
class SmarterInfo(mongoengine.Document):
"""A class to track database status informations"""
id = mongoengine.StringField(primary_key=True)
version = mongoengine.StringField(required=True)
working_assemblies = mongoengine.DictField()
plink_specie_opt = mongoengine.DictField()
last_updated = mongoengine.DateTimeField()
meta = {
'db_alias': DB_ALIAS,
'collection': 'smarterInfo'
}
def __str__(self):
return f"{self.id}: {self.version}"
class Counter(mongoengine.Document):
"""A class to deal with counter collection (created when initializing
smarter database)
"""
id = mongoengine.StringField(primary_key=True)
sequence_value = mongoengine.IntField(required=True, default=0)
meta = {
'db_alias': DB_ALIAS,
'collection': 'counters'
}
def __str__(self):
return f"{self.id}: {self.sequence_value}"
class SupportedChip(mongoengine.Document):
name = mongoengine.StringField(required=True, unique=True)
species = mongoengine.StringField(required=True)
manifacturer = mongoengine.StringField()
n_of_snps = mongoengine.IntField(default=0)
meta = {
'db_alias': DB_ALIAS,
'collection': 'supportedChips'
}
def __str__(self):
return f"'{self.name}' ({self.species})"
class BreedAlias(mongoengine.EmbeddedDocument):
fid = mongoengine.StringField(required=True)
dataset = mongoengine.ReferenceField(
'Dataset',
db_field="dataset_id")
country = mongoengine.StringField()
def __str__(self):
return f"{self.fid}: {self.dataset}"
class Breed(mongoengine.Document):
species = mongoengine.StringField(required=True)
name = mongoengine.StringField(required=True)
code = mongoengine.StringField(required=True)
aliases = mongoengine.ListField(
mongoengine.EmbeddedDocumentField(BreedAlias))
n_individuals = mongoengine.IntField()
meta = {
'db_alias': DB_ALIAS,
'collection': 'breeds',
'indexes': [
{
'fields': [
"species",
"code"
],
'unique': True,
'collation': {'locale': 'en', 'strength': 1}
},
{
'fields': [
"species",
"name"
],
'unique': True,
'collation': {'locale': 'en', 'strength': 1}
}
]
}
def __str__(self):
return f"{self.name} ({self.code}) {self.species}"
def get_or_create_breed(
species: str, name: str, code: str, aliases: list = []):
logger.debug(f"Checking: '{species}':'{name}':'{code}'")
# get a breed object relying on parameters
qs = Breed.objects(species=species, name=name, code=code)
modified = False
if qs.count() == 1:
breed = qs.get()
logger.debug(f"Got {breed}")
for alias in aliases:
if alias not in breed.aliases:
# track for update
modified = True
logger.info(f"Adding '{alias}' to '{breed}' aliases")
breed.aliases.append(alias)
elif qs.count() == 0:
logger.debug("Create a new breed object")
modified = True
breed = Breed(
species=species,
name=name,
code=code,
aliases=aliases,
n_individuals=0
)
else:
# should never see this relying on collection unique keys
raise SmarterDBException(
f"Got {qs.count()} results for '{species}':'{name}':'{code}'")
if modified:
logger.debug(f"Save '{breed}' to database")
breed.save()
return breed, modified
class Dataset(mongoengine.Document):
"""Describe a dataset instace with fields owned by data types"""
file = mongoengine.StringField(required=True, unique=True)
uploader = mongoengine.StringField()
size_ = mongoengine.StringField(db_field="size")
partner = mongoengine.StringField()
# HINT: should country, species and breeds be a list of items?
country = mongoengine.StringField()
species = mongoengine.StringField()
breed = mongoengine.StringField()
n_of_individuals = mongoengine.IntField()
n_of_records = mongoengine.IntField()
trait = mongoengine.StringField()
gene_array = mongoengine.StringField()
# add type tag
type_ = mongoengine.ListField(mongoengine.StringField(), db_field="type")
# file contents
contents = mongoengine.ListField(mongoengine.StringField())
# track the original chip_name with dataset
chip_name = mongoengine.StringField()
meta = {
'db_alias': DB_ALIAS,
'collection': 'dataset'
}
def __str__(self):
return f"file={self.file}, uploader={self.uploader}"
@property
def working_dir(self) -> pathlib.PosixPath:
"""returns the locations of dataset working directory. Could exists
or not
Returns:
pathlib.PosixPath: a subdirectory in /data/interim/
"""
if not self.id:
raise SmarterDBException(
"Can't define working dir. Object need to be stored in "
"database")
return get_project_dir() / f"data/interim/{self.id}"
@property
def result_dir(self) -> pathlib.PosixPath:
"""returns the locations of dataset processed directory. Could exists
or not
Returns:
pathlib.PosixPath: a subdirectory in /data/processed/
"""
if not self.id:
raise SmarterDBException(
"Can't define result dir. Object need to be stored in "
"database")
return get_project_dir() / f"data/processed/{self.id}"
def getNextSequenceValue(
sequence_name: str, mongodb: database.Database):
# this method is something similar to findAndModify,
# update a document and after get the UPDATED document
# https://docs.mongodb.com/manual/reference/method/db.collection.findAndModify/index.html#db.collection.findAndModify
sequenceDocument = mongodb.counters.find_one_and_update(
{"_id": sequence_name},
{"$inc": {"sequence_value": 1}},
return_document=ReturnDocument.AFTER
)
return sequenceDocument['sequence_value']
def getSmarterId(
species: str, country: str, breed: str, mongodb: database.Database):
# species, country and breed shold be defined in order to call this func
if not species or not country or not breed:
raise SmarterDBException(
"species, country and breed should be defined when calling "
"getSmarterId"
)
# get species code
if species not in SPECIES2CODE:
raise SmarterDBException(
"Species %s not managed by smarter" % (species))
species_code = SPECIES2CODE[species]
# get country code (two letters)
country = pycountry.countries.get(name=country)
country_code = country.alpha_2
# get breed code from database
breed_code = mongodb.breeds.find_one(
{"species": species, "name": breed})["code"]
# derive sequence_name from species
sequence_name = f"sample{species}"
# get the sequence number and define smarter id
sequence_id = getNextSequenceValue(sequence_name, mongodb)
# padding numbers
sequence_id = str(sequence_id).zfill(9)
smarter_id = f"{country_code}{species_code}-{breed_code}-{sequence_id}"
return smarter_id
class SEX(bytes, Enum):
UNKNOWN = (0, "Unknown")
MALE = (1, "Male")
FEMALE = (2, "Female")
def __new__(cls, value, label):
obj = bytes.__new__(cls, [value])
obj._value_ = value
obj.label = label
return obj
def __str__(self):
return self.label
@classmethod
def from_string(cls, value: str):
"""Get proper type relying on input string
Args:
value (str): required sex as string
Returns:
SEX: A sex instance (MALE, FEMALE, UNKNOWN)
"""
if type(value) != str:
raise SmarterDBException("Provided value should be a 'str' type")
value = value.upper()
if value in ['M', 'MALE', "1"]:
return cls.MALE
elif value in ['F', 'FEMALE', "2"]:
return cls.FEMALE
else:
logger.debug(
f"Unmanaged sex '{value}': return '{cls.UNKNOWN}'")
return cls.UNKNOWN
class Phenotype(mongoengine.DynamicEmbeddedDocument):
"""A class to deal with Phenotype. A dynamic document and not a generic
DictField since that there can be attributes which could be enforced to
have certain values. All other attributes could be set without any
assumptions
"""
purpose = mongoengine.StringField()
chest_girth = mongoengine.FloatField()
height = mongoengine.FloatField()
length = mongoengine.FloatField()
def __str__(self):
return f"{self.to_json()}"
class SAMPLETYPE(Enum):
FOREGROUND = 'foreground'
BACKGROUND = 'background'
class SampleSpecies(mongoengine.Document):
original_id = mongoengine.StringField(required=True)
smarter_id = mongoengine.StringField(required=True, unique=True)
country = mongoengine.StringField(required=True)
species = mongoengine.StringField(required=True)
breed = mongoengine.StringField(required=True)
breed_code = mongoengine.StringField(min_length=3)
# this will be a original_id alias (a different sample name in original
# data file)
alias = mongoengine.StringField()
# required to search a sample relying only on original ID
dataset = mongoengine.ReferenceField(
Dataset,
db_field="dataset_id",
reverse_delete_rule=mongoengine.DENY
)
# add type tag
type_ = mongoengine.EnumField(SAMPLETYPE, db_field="type", required=True)
# track the original chip_name with sample
chip_name = mongoengine.StringField()
# define enum types for sex
sex = mongoengine.EnumField(SEX)
# GPS location
# NOTE: X, Y where X is longitude, Y latitude
locations = mongoengine.ListField(mongoengine.PointField(), default=None)
# additional (not modelled) metadata
metadata = mongoengine.DictField(default=None)
# for phenotypes
phenotype = mongoengine.EmbeddedDocumentField(Phenotype, default=None)
meta = {
'abstract': True,
}
def save(self, *args, **kwargs):
"""Custom save method. Deal with smarter_id before save"""
if not self.smarter_id:
logger.debug(f"Determining smarter id for {self.original_id}")
# get the pymongo connection object
conn = mongoengine.connection.get_db(alias=DB_ALIAS)
# even is species, country and breed are required fields for
# SampleSpecies document, their value will not be evaluated until
# super().save() is called. I can't call it before determining
# a smarter_id
self.smarter_id = getSmarterId(
self.species,
self.country,
self.breed,
conn)
# default save method
super(SampleSpecies, self).save(*args, **kwargs)
def __str__(self):
return f"{self.smarter_id} ({self.breed})"
class SampleSheep(SampleSpecies):
# try to model relationship between samples
father_id = mongoengine.LazyReferenceField(
'SampleSheep',
passthrough=True,
reverse_delete_rule=mongoengine.NULLIFY
)
mother_id = mongoengine.LazyReferenceField(
'SampleSheep',
passthrough=True,
reverse_delete_rule=mongoengine.NULLIFY
)
meta = {
'db_alias': DB_ALIAS,
'collection': 'sampleSheep'
}
class SampleGoat(SampleSpecies):
# try to model relationship between samples
father_id = mongoengine.LazyReferenceField(
'SampleGoat',
passthrough=True,
reverse_delete_rule=mongoengine.NULLIFY
)
mother_id = mongoengine.LazyReferenceField(
'SampleGoat',
passthrough=True,
reverse_delete_rule=mongoengine.NULLIFY
)
meta = {
'db_alias': DB_ALIAS,
'collection': 'sampleGoat'
}
def get_or_create_sample(
SampleSpecies: Union[SampleGoat, SampleSheep],
original_id: str,
dataset: Dataset,
type_: str,
breed: Breed,
country: str,
chip_name: str = None,
sex: SEX = None,
alias: str = None) -> Union[SampleGoat, SampleSheep]:
"""Get or create a sample providing attributes (search for original_id in
provided dataset
Args:
SampleSpecies: (Union[SampleGoat, SampleSheep]): the class required
for insert/update
original_id (str): The original_id in the dataset
dataset (Dataset): the dataset instance used to register sample
type_ (str): "background" or "foreground"
breed (Breed): A breed instance
country (str): Country as a string
chip_name (str): the chip name
sex (SEX): A SEX instance
alias (str): an original_id alias
Returns:
Union[SampleGoat, SampleSheep]: a SampleSpecies instance
"""
created = False
# search for sample in database
qs = SampleSpecies.objects(
original_id=original_id, dataset=dataset)
if qs.count() == 1:
logger.debug(f"Sample '{original_id}' found in database")
sample = qs.get()
elif qs.count() == 0:
# insert sample into database
logger.info(f"Registering sample '{original_id}' in database")
sample = SampleSpecies(
original_id=original_id,
country=country,
species=dataset.species,
breed=breed.name,
breed_code=breed.code,
dataset=dataset,
type_=type_,
chip_name=chip_name,
sex=sex,
alias=alias
)
sample.save()
# incrementing breed n_individuals counter
breed.n_individuals += 1
breed.save()
created = True
else:
raise SmarterDBException(
f"Got {qs.count()} results for '{original_id}'")
return sample, created
def get_sample_type(dataset: Dataset):
"""
test if foreground or background dataset
Args:
dataset (Dataset): the dataset instance used to register sample
Returns:
str: sample type ("background" or "foreground")
"""
type_ = None
for sampletype in SAMPLETYPE:
if sampletype.value in dataset.type_:
logger.debug(
f"Found {sampletype.value} in {dataset.type_}")
type_ = sampletype.value
break
return type_
class Consequence(mongoengine.EmbeddedDocument):
pass
class Location(mongoengine.EmbeddedDocument):
ss_id = mongoengine.StringField()
version = mongoengine.StringField(required=True)
chrom = mongoengine.StringField(required=True)
position = mongoengine.IntField(required=True)
alleles = mongoengine.StringField()
illumina = mongoengine.StringField(required=True)
illumina_forward = mongoengine.StringField()
illumina_strand = mongoengine.StringField()
affymetrix_ab = mongoengine.StringField()
strand = mongoengine.StringField()
imported_from = mongoengine.StringField(required=True)
# this could be the manifactured date or the last updated
date = mongoengine.DateTimeField()
consequences = mongoengine.ListField(
mongoengine.EmbeddedDocumentField(Consequence), default=None)
def __init__(self, *args, **kwargs):
illumina_top = None
# remove illumina top from arguments
if 'illumina_top' in kwargs:
illumina_top = kwargs.pop('illumina_top')
# initialize base object
super(Location, self).__init__(*args, **kwargs)
# fix illumina top if necessary
if illumina_top:
self.illumina_top = illumina_top
@property
def illumina_top(self):
"""Return genotype in illumina top format"""
if self.illumina_strand in ['BOT', 'bottom']:
return complement(self.illumina)
elif (not self.illumina_strand or
self.illumina_strand in ['TOP', 'top']):
return self.illumina
else:
raise SmarterDBException(
f"{self.illumina_strand} not managed")
@illumina_top.setter
def illumina_top(self, genotype: str):
if (not self.illumina_strand or
self.illumina_strand in ['TOP', 'top']):
self.illumina = genotype
elif self.illumina_strand in ['BOT', 'bottom']:
self.illumina = complement(genotype)
else:
raise SmarterDBException(
f"{self.illumina_strand} not managed")
def __str__(self):
return (
f"({self.imported_from}:{self.version}) "
f"{self.chrom}:{self.position} [{self.illumina_top}]"
)
def __eq__(self, other):
if super().__eq__(other):
return True
else:
# check by positions
for attribute in ["chrom", "position"]:
if getattr(self, attribute) != getattr(other, attribute):
return False
# check genotype equality
if self.illumina_top != other.illumina_top:
return False
return True
def __check_coding(self, genotype: list, coding: str, missing: str):
"""Internal method to check genotype coding"""
# get illumina data as an array
data = getattr(self, coding).split("/")
for allele in genotype:
# mind to missing values. If missing can't be equal to illumina_top
if allele in missing:
continue
if allele not in data:
return False
return True
def is_top(self, genotype: list, missing: list = ["0", "-"]) -> bool:
"""Return True if genotype is compatible with illumina TOP coding
Args:
genotype (list): a list of two alleles (ex ['A','C'])
missing (str): missing allele string (def "0")
Returns:
bool: True if in top coordinates
"""
return self.__check_coding(genotype, "illumina_top", missing)
def is_forward(self, genotype: list, missing: list = ["0", "-"]) -> bool:
"""Return True if genotype is compatible with illumina FORWARD coding
Args:
genotype (list): a list of two alleles (ex ['A','C'])
missing (str): missing allele string (def "0")
Returns:
bool: True if in top coordinates
"""
return self.__check_coding(genotype, "illumina_forward", missing)
def is_ab(self, genotype: list, missing: list = ["0", "-"]) -> bool:
"""Return True if genotype is compatible with illumina AB coding
Args:
genotype (list): a list of two alleles (ex ['A','B'])
missing (str): missing allele string (def "-")
Returns:
bool: True if in top coordinates
"""
for allele in genotype:
# mind to missing valies
if allele not in ["A", "B"] + missing:
return False
return True
def is_affymetrix(
self, genotype: list, missing: list = ["0", "-"]) -> bool:
"""Return True if genotype is compatible with affymetrix coding
Args:
genotype (list): a list of two alleles (ex ['A','C'])
missing (str): missing allele string (def "0")
Returns:
bool: True if in top coordinates
"""
return self.__check_coding(genotype, "affymetrix_ab", missing)
def forward2top(self, genotype: list, missing: list = ["0", "-"]) -> list:
"""Convert an illumina forward SNP in a illumina top snp
Args:
genotype (list): a list of two alleles (ex ['A','C'])
missing (str): missing allele string (def "0")
Returns:
list: The genotype in top format
"""
# get illumina data as an array
forward = self.illumina_forward.split("/")
top = self.illumina_top.split("/")
result = []
for allele in genotype:
# mind to missing values
if allele in missing:
result.append("0")
elif allele not in forward:
raise SmarterDBException(
f"{genotype} is not in forward coding")
else:
result.append(top[forward.index(allele)])
return result
def ab2top(self, genotype: list, missing: list = ["0", "-"]) -> list:
"""Convert an illumina ab SNP in a illumina top snp
Args:
genotype (list): a list of two alleles (ex ['A','B'])
missing (str): missing allele string (def "-")
Returns:
list: The genotype in top format
"""
# get illumina data as a dict
top = self.illumina_top.split("/")
top = {"A": top[0], "B": top[1]}
result = []
for allele in genotype:
# mind to missing values
if allele in missing:
result.append("0")
elif allele not in ["A", "B"]:
raise SmarterDBException(
f"{genotype} is not in ab coding")
else:
result.append(top[allele])
return result
def affy2top(self, genotype: list, missing: list = ["0", "-"]) -> list:
"""Convert an affymetrix SNP in a illumina top snp
Args:
genotype (list): a list of two alleles (ex ['A','C'])
missing (str): missing allele string (def "0")
Returns:
list: The genotype in top format
"""
# get illumina data as an array
affymetrix = self.affymetrix_ab.split("/")
top = self.illumina_top.split("/")
result = []
for allele in genotype:
# mind to missing values
if allele in missing:
result.append("0")
elif allele not in affymetrix:
raise SmarterDBException(
f"{genotype} is not in affymetrix coding")
else:
result.append(top[affymetrix.index(allele)])
return result
class VariantSpecies(mongoengine.Document):
rs_id = mongoengine.StringField()
chip_name = mongoengine.ListField(mongoengine.StringField())
name = mongoengine.StringField(unique=True)
# sequence should model both illumina or affymetrix sequences
sequence = mongoengine.DictField()
locations = mongoengine.ListField(
mongoengine.EmbeddedDocumentField(Location))
# HINT: should sender be a Location attribute?
sender = mongoengine.StringField()
# Affymetryx specific fields
# more probe could be assigned to the same SNP
probeset_id = mongoengine.ListField(mongoengine.StringField())
affy_snp_id = mongoengine.StringField()
cust_id = mongoengine.StringField()
# abstract class with custom indexes
# TODO: need a index for position (chrom, position, version)
meta = {
'abstract': True,
'indexes': [
{
'fields': [
"locations.chrom",
"locations.position"
],
},
'probeset_id',
'rs_id'
]
}
def __str__(self):
return (f"name='{self.name}', rs_id='{self.rs_id}'")
def save(self, *args, **kwargs):
"""Custom save method. Deal with variant name before save"""
if not self.name and self.affy_snp_id:
logger.debug(f"Set variant name to {self.affy_snp_id}")
self.name = self.affy_snp_id
# default save method
super(VariantSpecies, self).save(*args, **kwargs)
def get_location_index(self, version: str, imported_from='SNPchiMp v.3'):
"""Returns location index for assembly version and imported source
Args:
version (str): assembly version (ex: 'Oar_v3.1')
imported_from (str): coordinates source (ex: 'SNPchiMp v.3')
Returns:
int: the index of the location requested
"""
for index, location in enumerate(self.locations):
if (location.version == version and
location.imported_from == imported_from):
return index
raise SmarterDBException(
f"Location '{version}' '{imported_from}' is not in locations"
)
def get_location(self, version: str, imported_from='SNPchiMp v.3'):
"""Returns location for assembly version and imported source
Args:
version (str): assembly version (ex: 'Oar_v3.1')
imported_from (str): coordinates source (ex: 'SNPchiMp v.3')
Returns:
Location: the genomic coordinates
"""
def custom_filter(location: Location):
if (location.version == version and
location.imported_from == imported_from):
return True
return False
locations = list(filter(custom_filter, self.locations))
if len(locations) != 1:
raise SmarterDBException(
"Couldn't determine a unique location for "
f"'{self.name}' '{version}' '{imported_from}'")
return locations[0]
class VariantSheep(VariantSpecies):
meta = {
'db_alias': DB_ALIAS,
'collection': 'variantSheep'
}
class VariantGoat(VariantSpecies):
meta = {
'db_alias': DB_ALIAS,
'collection': 'variantGoat'
}
| [
"logging.getLogger",
"mongoengine.EnumField",
"mongoengine.ReferenceField",
"dotenv.find_dotenv",
"os.getenv",
"mongoengine.PointField",
"mongoengine.LazyReferenceField",
"mongoengine.EmbeddedDocumentField",
"mongoengine.connection.get_db",
"mongoengine.DictField",
"mongoengine.DateTimeField",
... | [((511, 538), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (528, 538), False, 'import logging\n'), ((1406, 1447), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1429, 1447), False, 'import mongoengine\n'), ((1462, 1500), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (1485, 1500), False, 'import mongoengine\n'), ((1526, 1549), 'mongoengine.DictField', 'mongoengine.DictField', ([], {}), '()\n', (1547, 1549), False, 'import mongoengine\n'), ((1573, 1596), 'mongoengine.DictField', 'mongoengine.DictField', ([], {}), '()\n', (1594, 1596), False, 'import mongoengine\n'), ((1616, 1643), 'mongoengine.DateTimeField', 'mongoengine.DateTimeField', ([], {}), '()\n', (1641, 1643), False, 'import mongoengine\n'), ((1951, 1992), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1974, 1992), False, 'import mongoengine\n'), ((2014, 2060), 'mongoengine.IntField', 'mongoengine.IntField', ([], {'required': '(True)', 'default': '(0)'}), '(required=True, default=0)\n', (2034, 2060), False, 'import mongoengine\n'), ((2275, 2326), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)', 'unique': '(True)'}), '(required=True, unique=True)\n', (2298, 2326), False, 'import mongoengine\n'), ((2341, 2379), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (2364, 2379), False, 'import mongoengine\n'), ((2399, 2424), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (2422, 2424), False, 'import mongoengine\n'), ((2441, 2472), 'mongoengine.IntField', 'mongoengine.IntField', ([], {'default': '(0)'}), '(default=0)\n', (2461, 2472), False, 'import mongoengine\n'), ((2695, 2733), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (2718, 2733), False, 'import mongoengine\n'), ((2748, 2808), 'mongoengine.ReferenceField', 'mongoengine.ReferenceField', (['"""Dataset"""'], {'db_field': '"""dataset_id"""'}), "('Dataset', db_field='dataset_id')\n", (2774, 2808), False, 'import mongoengine\n'), ((2840, 2865), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (2863, 2865), False, 'import mongoengine\n'), ((2986, 3024), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (3009, 3024), False, 'import mongoengine\n'), ((3036, 3074), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (3059, 3074), False, 'import mongoengine\n'), ((3086, 3124), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (3109, 3124), False, 'import mongoengine\n'), ((3237, 3259), 'mongoengine.IntField', 'mongoengine.IntField', ([], {}), '()\n', (3257, 3259), False, 'import mongoengine\n'), ((5229, 5280), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)', 'unique': '(True)'}), '(required=True, unique=True)\n', (5252, 5280), False, 'import mongoengine\n'), ((5296, 5321), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (5319, 5321), False, 'import mongoengine\n'), ((5334, 5374), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'db_field': '"""size"""'}), "(db_field='size')\n", (5357, 5374), False, 'import mongoengine\n'), ((5389, 5414), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (5412, 5414), False, 'import mongoengine\n'), ((5497, 5522), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (5520, 5522), False, 'import mongoengine\n'), ((5537, 5562), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (5560, 5562), False, 'import mongoengine\n'), ((5575, 5600), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (5598, 5600), False, 'import mongoengine\n'), ((5625, 5647), 'mongoengine.IntField', 'mongoengine.IntField', ([], {}), '()\n', (5645, 5647), False, 'import mongoengine\n'), ((5667, 5689), 'mongoengine.IntField', 'mongoengine.IntField', ([], {}), '()\n', (5687, 5689), False, 'import mongoengine\n'), ((5702, 5727), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (5725, 5727), False, 'import mongoengine\n'), ((5745, 5770), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (5768, 5770), False, 'import mongoengine\n'), ((6019, 6044), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (6042, 6044), False, 'import mongoengine\n'), ((8335, 8372), 'pycountry.countries.get', 'pycountry.countries.get', ([], {'name': 'country'}), '(name=country)\n', (8358, 8372), False, 'import pycountry\n'), ((10208, 10233), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (10231, 10233), False, 'import mongoengine\n'), ((10252, 10276), 'mongoengine.FloatField', 'mongoengine.FloatField', ([], {}), '()\n', (10274, 10276), False, 'import mongoengine\n'), ((10290, 10314), 'mongoengine.FloatField', 'mongoengine.FloatField', ([], {}), '()\n', (10312, 10314), False, 'import mongoengine\n'), ((10328, 10352), 'mongoengine.FloatField', 'mongoengine.FloatField', ([], {}), '()\n', (10350, 10352), False, 'import mongoengine\n'), ((10561, 10599), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (10584, 10599), False, 'import mongoengine\n'), ((10617, 10668), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)', 'unique': '(True)'}), '(required=True, unique=True)\n', (10640, 10668), False, 'import mongoengine\n'), ((10684, 10722), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (10707, 10722), False, 'import mongoengine\n'), ((10737, 10775), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (10760, 10775), False, 'import mongoengine\n'), ((10788, 10826), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (10811, 10826), False, 'import mongoengine\n'), ((10844, 10881), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'min_length': '(3)'}), '(min_length=3)\n', (10867, 10881), False, 'import mongoengine\n'), ((10988, 11013), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (11011, 11013), False, 'import mongoengine\n'), ((11091, 11191), 'mongoengine.ReferenceField', 'mongoengine.ReferenceField', (['Dataset'], {'db_field': '"""dataset_id"""', 'reverse_delete_rule': 'mongoengine.DENY'}), "(Dataset, db_field='dataset_id',\n reverse_delete_rule=mongoengine.DENY)\n", (11117, 11191), False, 'import mongoengine\n'), ((11250, 11315), 'mongoengine.EnumField', 'mongoengine.EnumField', (['SAMPLETYPE'], {'db_field': '"""type"""', 'required': '(True)'}), "(SAMPLETYPE, db_field='type', required=True)\n", (11271, 11315), False, 'import mongoengine\n'), ((11380, 11405), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (11403, 11405), False, 'import mongoengine\n'), ((11449, 11475), 'mongoengine.EnumField', 'mongoengine.EnumField', (['SEX'], {}), '(SEX)\n', (11470, 11475), False, 'import mongoengine\n'), ((11681, 11716), 'mongoengine.DictField', 'mongoengine.DictField', ([], {'default': 'None'}), '(default=None)\n', (11702, 11716), False, 'import mongoengine\n'), ((11755, 11813), 'mongoengine.EmbeddedDocumentField', 'mongoengine.EmbeddedDocumentField', (['Phenotype'], {'default': 'None'}), '(Phenotype, default=None)\n', (11788, 11813), False, 'import mongoengine\n'), ((12858, 12966), 'mongoengine.LazyReferenceField', 'mongoengine.LazyReferenceField', (['"""SampleSheep"""'], {'passthrough': '(True)', 'reverse_delete_rule': 'mongoengine.NULLIFY'}), "('SampleSheep', passthrough=True,\n reverse_delete_rule=mongoengine.NULLIFY)\n", (12888, 12966), False, 'import mongoengine\n'), ((13010, 13118), 'mongoengine.LazyReferenceField', 'mongoengine.LazyReferenceField', (['"""SampleSheep"""'], {'passthrough': '(True)', 'reverse_delete_rule': 'mongoengine.NULLIFY'}), "('SampleSheep', passthrough=True,\n reverse_delete_rule=mongoengine.NULLIFY)\n", (13040, 13118), False, 'import mongoengine\n'), ((13330, 13437), 'mongoengine.LazyReferenceField', 'mongoengine.LazyReferenceField', (['"""SampleGoat"""'], {'passthrough': '(True)', 'reverse_delete_rule': 'mongoengine.NULLIFY'}), "('SampleGoat', passthrough=True,\n reverse_delete_rule=mongoengine.NULLIFY)\n", (13360, 13437), False, 'import mongoengine\n'), ((13481, 13588), 'mongoengine.LazyReferenceField', 'mongoengine.LazyReferenceField', (['"""SampleGoat"""'], {'passthrough': '(True)', 'reverse_delete_rule': 'mongoengine.NULLIFY'}), "('SampleGoat', passthrough=True,\n reverse_delete_rule=mongoengine.NULLIFY)\n", (13511, 13588), False, 'import mongoengine\n'), ((16356, 16381), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (16379, 16381), False, 'import mongoengine\n'), ((16396, 16434), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (16419, 16434), False, 'import mongoengine\n'), ((16447, 16485), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (16470, 16485), False, 'import mongoengine\n'), ((16501, 16536), 'mongoengine.IntField', 'mongoengine.IntField', ([], {'required': '(True)'}), '(required=True)\n', (16521, 16536), False, 'import mongoengine\n'), ((16551, 16576), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (16574, 16576), False, 'import mongoengine\n'), ((16592, 16630), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (16615, 16630), False, 'import mongoengine\n'), ((16654, 16679), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (16677, 16679), False, 'import mongoengine\n'), ((16702, 16727), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (16725, 16727), False, 'import mongoengine\n'), ((16748, 16773), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (16771, 16773), False, 'import mongoengine\n'), ((16787, 16812), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (16810, 16812), False, 'import mongoengine\n'), ((16833, 16871), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'required': '(True)'}), '(required=True)\n', (16856, 16871), False, 'import mongoengine\n'), ((16946, 16973), 'mongoengine.DateTimeField', 'mongoengine.DateTimeField', ([], {}), '()\n', (16971, 16973), False, 'import mongoengine\n'), ((24020, 24045), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (24043, 24045), False, 'import mongoengine\n'), ((24123, 24159), 'mongoengine.StringField', 'mongoengine.StringField', ([], {'unique': '(True)'}), '(unique=True)\n', (24146, 24159), False, 'import mongoengine\n'), ((24242, 24265), 'mongoengine.DictField', 'mongoengine.DictField', ([], {}), '()\n', (24263, 24265), False, 'import mongoengine\n'), ((24424, 24449), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (24447, 24449), False, 'import mongoengine\n'), ((24620, 24645), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (24643, 24645), False, 'import mongoengine\n'), ((24660, 24685), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (24683, 24685), False, 'import mongoengine\n'), ((795, 808), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (806, 808), False, 'from dotenv import find_dotenv, load_dotenv\n'), ((3170, 3215), 'mongoengine.EmbeddedDocumentField', 'mongoengine.EmbeddedDocumentField', (['BreedAlias'], {}), '(BreedAlias)\n', (3203, 3215), False, 'import mongoengine\n'), ((5825, 5850), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (5848, 5850), False, 'import mongoengine\n'), ((5927, 5952), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (5950, 5952), False, 'import mongoengine\n'), ((11584, 11608), 'mongoengine.PointField', 'mongoengine.PointField', ([], {}), '()\n', (11606, 11608), False, 'import mongoengine\n'), ((17025, 17071), 'mongoengine.EmbeddedDocumentField', 'mongoengine.EmbeddedDocumentField', (['Consequence'], {}), '(Consequence)\n', (17058, 17071), False, 'import mongoengine\n'), ((24084, 24109), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (24107, 24109), False, 'import mongoengine\n'), ((24314, 24357), 'mongoengine.EmbeddedDocumentField', 'mongoengine.EmbeddedDocumentField', (['Location'], {}), '(Location)\n', (24347, 24357), False, 'import mongoengine\n'), ((24575, 24600), 'mongoengine.StringField', 'mongoengine.StringField', ([], {}), '()\n', (24598, 24600), False, 'import mongoengine\n'), ((922, 955), 'os.getenv', 'os.getenv', (['"""MONGODB_SMARTER_USER"""'], {}), "('MONGODB_SMARTER_USER')\n", (931, 955), False, 'import os\n'), ((974, 997), 'os.getenv', 'os.getenv', (['"""<PASSWORD>"""'], {}), "('<PASSWORD>')\n", (983, 997), False, 'import os\n'), ((12141, 12186), 'mongoengine.connection.get_db', 'mongoengine.connection.get_db', ([], {'alias': 'DB_ALIAS'}), '(alias=DB_ALIAS)\n', (12170, 12186), False, 'import mongoengine\n')] |
#! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.exceptions.errors import BotorchError, BotorchTensorDimensionError
from botorch.utils.multi_objective.box_decomposition import NondominatedPartitioning
from botorch.utils.testing import BotorchTestCase
class TestNonDominatedPartitioning(BotorchTestCase):
def test_non_dominated_partitioning(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
partitioning = NondominatedPartitioning(num_outcomes=2)
# assert error is raised if pareto_Y has not been computed
with self.assertRaises(BotorchError):
partitioning.pareto_Y
# test eps
# no pareto_Y
self.assertEqual(partitioning.eps, 1e-6)
partitioning = NondominatedPartitioning(num_outcomes=2, eps=1.0)
# eps set
self.assertEqual(partitioning.eps, 1.0)
# set pareto_Y
partitioning = NondominatedPartitioning(num_outcomes=2)
Y = torch.zeros(1, 2, **tkwargs)
partitioning.update(Y=Y)
self.assertEqual(partitioning.eps, 1e-6 if dtype == torch.float else 1e-8)
# test _update_pareto_Y
partitioning.Y = -Y
self.assertFalse(partitioning._update_pareto_Y())
# test m=2
arange = torch.arange(3, 9, **tkwargs)
pareto_Y = torch.stack([arange, 11 - arange], dim=-1)
Y = torch.cat(
[
pareto_Y,
torch.tensor(
[[8.0, 2.0], [7.0, 1.0]], **tkwargs
), # add some non-pareto elements
],
dim=0,
)
partitioning = NondominatedPartitioning(num_outcomes=2, Y=Y)
sorting = torch.argsort(pareto_Y[:, 0], descending=True)
self.assertTrue(torch.equal(pareto_Y[sorting], partitioning.pareto_Y))
ref_point = torch.zeros(2, **tkwargs)
inf = float("inf")
expected_cell_bounds = torch.tensor(
[
[
[8.0, 0.0],
[7.0, 3.0],
[6.0, 4.0],
[5.0, 5.0],
[4.0, 6.0],
[3.0, 7.0],
[0.0, 8.0],
],
[
[inf, inf],
[8.0, inf],
[7.0, inf],
[6.0, inf],
[5.0, inf],
[4.0, inf],
[3.0, inf],
],
],
**tkwargs
)
cell_bounds = partitioning.get_hypercell_bounds(ref_point)
self.assertTrue(torch.equal(cell_bounds, expected_cell_bounds))
# test compute hypervolume
hv = partitioning.compute_hypervolume(ref_point)
self.assertEqual(hv, 49.0)
# test error when reference is not worse than all pareto_Y
with self.assertRaises(ValueError):
partitioning.compute_hypervolume(pareto_Y.max(dim=0).values)
# test error with partition_non_dominated_space_2d for m=3
partitioning = NondominatedPartitioning(
num_outcomes=3, Y=torch.zeros(1, 3, **tkwargs)
)
with self.assertRaises(BotorchTensorDimensionError):
partitioning.partition_non_dominated_space_2d()
# test m=3
pareto_Y = torch.tensor(
[[1.0, 6.0, 8.0], [2.0, 4.0, 10.0], [3.0, 5.0, 7.0]], **tkwargs
)
partitioning = NondominatedPartitioning(num_outcomes=3, Y=pareto_Y)
sorting = torch.argsort(pareto_Y[:, 0], descending=True)
self.assertTrue(torch.equal(pareto_Y[sorting], partitioning.pareto_Y))
ref_point = torch.tensor([-1.0, -2.0, -3.0], **tkwargs)
expected_cell_bounds = torch.tensor(
[
[
[1.0, 4.0, 7.0],
[-1.0, -2.0, 10.0],
[-1.0, 4.0, 8.0],
[1.0, -2.0, 10.0],
[1.0, 4.0, 8.0],
[-1.0, 6.0, -3.0],
[1.0, 5.0, -3.0],
[-1.0, 5.0, 8.0],
[2.0, -2.0, 7.0],
[2.0, 4.0, 7.0],
[3.0, -2.0, -3.0],
[2.0, -2.0, 8.0],
[2.0, 5.0, -3.0],
],
[
[2.0, 5.0, 8.0],
[1.0, 4.0, inf],
[1.0, 5.0, inf],
[2.0, 4.0, inf],
[2.0, 5.0, inf],
[1.0, inf, 8.0],
[2.0, inf, 8.0],
[2.0, inf, inf],
[3.0, 4.0, 8.0],
[3.0, 5.0, 8.0],
[inf, 5.0, 8.0],
[inf, 5.0, inf],
[inf, inf, inf],
],
],
**tkwargs
)
cell_bounds = partitioning.get_hypercell_bounds(ref_point)
# cell bounds can have different order
num_matches = (
(cell_bounds.unsqueeze(0) == expected_cell_bounds.unsqueeze(1))
.all(dim=-1)
.any(dim=0)
.sum()
)
self.assertTrue(num_matches, 9)
# test compute hypervolume
hv = partitioning.compute_hypervolume(ref_point)
self.assertEqual(hv, 358.0)
# TODO: test approximate decomposition
| [
"torch.stack",
"torch.equal",
"torch.tensor",
"torch.argsort",
"botorch.utils.multi_objective.box_decomposition.NondominatedPartitioning",
"torch.zeros",
"torch.arange"
] | [((724, 764), 'botorch.utils.multi_objective.box_decomposition.NondominatedPartitioning', 'NondominatedPartitioning', ([], {'num_outcomes': '(2)'}), '(num_outcomes=2)\n', (748, 764), False, 'from botorch.utils.multi_objective.box_decomposition import NondominatedPartitioning\n'), ((1053, 1102), 'botorch.utils.multi_objective.box_decomposition.NondominatedPartitioning', 'NondominatedPartitioning', ([], {'num_outcomes': '(2)', 'eps': '(1.0)'}), '(num_outcomes=2, eps=1.0)\n', (1077, 1102), False, 'from botorch.utils.multi_objective.box_decomposition import NondominatedPartitioning\n'), ((1231, 1271), 'botorch.utils.multi_objective.box_decomposition.NondominatedPartitioning', 'NondominatedPartitioning', ([], {'num_outcomes': '(2)'}), '(num_outcomes=2)\n', (1255, 1271), False, 'from botorch.utils.multi_objective.box_decomposition import NondominatedPartitioning\n'), ((1288, 1316), 'torch.zeros', 'torch.zeros', (['(1)', '(2)'], {}), '(1, 2, **tkwargs)\n', (1299, 1316), False, 'import torch\n'), ((1617, 1646), 'torch.arange', 'torch.arange', (['(3)', '(9)'], {}), '(3, 9, **tkwargs)\n', (1629, 1646), False, 'import torch\n'), ((1670, 1712), 'torch.stack', 'torch.stack', (['[arange, 11 - arange]'], {'dim': '(-1)'}), '([arange, 11 - arange], dim=-1)\n', (1681, 1712), False, 'import torch\n'), ((2020, 2065), 'botorch.utils.multi_objective.box_decomposition.NondominatedPartitioning', 'NondominatedPartitioning', ([], {'num_outcomes': '(2)', 'Y': 'Y'}), '(num_outcomes=2, Y=Y)\n', (2044, 2065), False, 'from botorch.utils.multi_objective.box_decomposition import NondominatedPartitioning\n'), ((2088, 2134), 'torch.argsort', 'torch.argsort', (['pareto_Y[:, 0]'], {'descending': '(True)'}), '(pareto_Y[:, 0], descending=True)\n', (2101, 2134), False, 'import torch\n'), ((2242, 2267), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2, **tkwargs)\n', (2253, 2267), False, 'import torch\n'), ((2334, 2540), 'torch.tensor', 'torch.tensor', (['[[[8.0, 0.0], [7.0, 3.0], [6.0, 4.0], [5.0, 5.0], [4.0, 6.0], [3.0, 7.0], [\n 0.0, 8.0]], [[inf, inf], [8.0, inf], [7.0, inf], [6.0, inf], [5.0, inf],\n [4.0, inf], [3.0, inf]]]'], {}), '([[[8.0, 0.0], [7.0, 3.0], [6.0, 4.0], [5.0, 5.0], [4.0, 6.0],\n [3.0, 7.0], [0.0, 8.0]], [[inf, inf], [8.0, inf], [7.0, inf], [6.0, inf\n ], [5.0, inf], [4.0, inf], [3.0, inf]]], **tkwargs)\n', (2346, 2540), False, 'import torch\n'), ((3878, 3955), 'torch.tensor', 'torch.tensor', (['[[1.0, 6.0, 8.0], [2.0, 4.0, 10.0], [3.0, 5.0, 7.0]]'], {}), '([[1.0, 6.0, 8.0], [2.0, 4.0, 10.0], [3.0, 5.0, 7.0]], **tkwargs)\n', (3890, 3955), False, 'import torch\n'), ((4013, 4065), 'botorch.utils.multi_objective.box_decomposition.NondominatedPartitioning', 'NondominatedPartitioning', ([], {'num_outcomes': '(3)', 'Y': 'pareto_Y'}), '(num_outcomes=3, Y=pareto_Y)\n', (4037, 4065), False, 'from botorch.utils.multi_objective.box_decomposition import NondominatedPartitioning\n'), ((4088, 4134), 'torch.argsort', 'torch.argsort', (['pareto_Y[:, 0]'], {'descending': '(True)'}), '(pareto_Y[:, 0], descending=True)\n', (4101, 4134), False, 'import torch\n'), ((4242, 4285), 'torch.tensor', 'torch.tensor', (['[-1.0, -2.0, -3.0]'], {}), '([-1.0, -2.0, -3.0], **tkwargs)\n', (4254, 4285), False, 'import torch\n'), ((4321, 4834), 'torch.tensor', 'torch.tensor', (['[[[1.0, 4.0, 7.0], [-1.0, -2.0, 10.0], [-1.0, 4.0, 8.0], [1.0, -2.0, 10.0],\n [1.0, 4.0, 8.0], [-1.0, 6.0, -3.0], [1.0, 5.0, -3.0], [-1.0, 5.0, 8.0],\n [2.0, -2.0, 7.0], [2.0, 4.0, 7.0], [3.0, -2.0, -3.0], [2.0, -2.0, 8.0],\n [2.0, 5.0, -3.0]], [[2.0, 5.0, 8.0], [1.0, 4.0, inf], [1.0, 5.0, inf],\n [2.0, 4.0, inf], [2.0, 5.0, inf], [1.0, inf, 8.0], [2.0, inf, 8.0], [\n 2.0, inf, inf], [3.0, 4.0, 8.0], [3.0, 5.0, 8.0], [inf, 5.0, 8.0], [inf,\n 5.0, inf], [inf, inf, inf]]]'], {}), '([[[1.0, 4.0, 7.0], [-1.0, -2.0, 10.0], [-1.0, 4.0, 8.0], [1.0,\n -2.0, 10.0], [1.0, 4.0, 8.0], [-1.0, 6.0, -3.0], [1.0, 5.0, -3.0], [-\n 1.0, 5.0, 8.0], [2.0, -2.0, 7.0], [2.0, 4.0, 7.0], [3.0, -2.0, -3.0], [\n 2.0, -2.0, 8.0], [2.0, 5.0, -3.0]], [[2.0, 5.0, 8.0], [1.0, 4.0, inf],\n [1.0, 5.0, inf], [2.0, 4.0, inf], [2.0, 5.0, inf], [1.0, inf, 8.0], [\n 2.0, inf, 8.0], [2.0, inf, inf], [3.0, 4.0, 8.0], [3.0, 5.0, 8.0], [inf,\n 5.0, 8.0], [inf, 5.0, inf], [inf, inf, inf]]], **tkwargs)\n', (4333, 4834), False, 'import torch\n'), ((2163, 2216), 'torch.equal', 'torch.equal', (['pareto_Y[sorting]', 'partitioning.pareto_Y'], {}), '(pareto_Y[sorting], partitioning.pareto_Y)\n', (2174, 2216), False, 'import torch\n'), ((3118, 3164), 'torch.equal', 'torch.equal', (['cell_bounds', 'expected_cell_bounds'], {}), '(cell_bounds, expected_cell_bounds)\n', (3129, 3164), False, 'import torch\n'), ((4163, 4216), 'torch.equal', 'torch.equal', (['pareto_Y[sorting]', 'partitioning.pareto_Y'], {}), '(pareto_Y[sorting], partitioning.pareto_Y)\n', (4174, 4216), False, 'import torch\n'), ((1808, 1857), 'torch.tensor', 'torch.tensor', (['[[8.0, 2.0], [7.0, 1.0]]'], {}), '([[8.0, 2.0], [7.0, 1.0]], **tkwargs)\n', (1820, 1857), False, 'import torch\n'), ((3660, 3688), 'torch.zeros', 'torch.zeros', (['(1)', '(3)'], {}), '(1, 3, **tkwargs)\n', (3671, 3688), False, 'import torch\n')] |
import pytest
from responses import RequestsMock
from tests import loader
def test_parameter_cannot_be_parsed(responses: RequestsMock, tmpdir):
responses.add(
responses.GET,
url="http://test/",
json={
"swagger": "2.0",
"paths": {
"/test": {
"get": {
"parameters": [
{
"in": "query",
"name": "param",
"schema": {},
}
],
"responses": {
"200": {
"description": "return value",
"schema": {"type": "string"},
}
},
}
}
},
},
match_querystring=True,
)
with pytest.raises(Exception) as exception_info:
loader.load(
tmpdir,
{
"invalid": {
"open_api": {"definition": "http://test/"},
"formulas": {"dynamic_array": {"lock_excel": True}},
}
},
)
assert (
str(exception_info.value)
== "Unable to extract parameters from {'in': 'query', 'name': 'param', 'schema': {}, 'server_param_name': 'param'}"
)
def test_parameter_with_more_than_one_field_type(responses: RequestsMock, tmpdir):
responses.add(
responses.GET,
url="http://test/",
json={
"swagger": "2.0",
"paths": {
"/test": {
"get": {
"parameters": [
{
"in": "query",
"name": "param",
"type": ["string", "integer"],
}
],
"responses": {
"200": {
"description": "return value",
}
},
}
}
},
},
match_querystring=True,
)
with pytest.raises(Exception) as exception_info:
loader.load(
tmpdir,
{
"invalid": {
"open_api": {"definition": "http://test/"},
"formulas": {"dynamic_array": {"lock_excel": True}},
}
},
)
assert (
str(exception_info.value)
== "Unable to guess field type amongst ['string', 'integer']"
)
| [
"pytest.raises",
"tests.loader.load"
] | [((981, 1005), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (994, 1005), False, 'import pytest\n'), ((1033, 1168), 'tests.loader.load', 'loader.load', (['tmpdir', "{'invalid': {'open_api': {'definition': 'http://test/'}, 'formulas': {\n 'dynamic_array': {'lock_excel': True}}}}"], {}), "(tmpdir, {'invalid': {'open_api': {'definition': 'http://test/'},\n 'formulas': {'dynamic_array': {'lock_excel': True}}}})\n", (1044, 1168), False, 'from tests import loader\n'), ((2340, 2364), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2353, 2364), False, 'import pytest\n'), ((2392, 2527), 'tests.loader.load', 'loader.load', (['tmpdir', "{'invalid': {'open_api': {'definition': 'http://test/'}, 'formulas': {\n 'dynamic_array': {'lock_excel': True}}}}"], {}), "(tmpdir, {'invalid': {'open_api': {'definition': 'http://test/'},\n 'formulas': {'dynamic_array': {'lock_excel': True}}}})\n", (2403, 2527), False, 'from tests import loader\n')] |
from random import randint
from typing import Callable, List, Optional
class Coin:
"""Simulates a coin."""
def __init__(self) -> None:
self.__head = False
self.__toss_count = 0
self.__head_count = 0
def toss(self) -> None:
"""Toss a coin."""
r = randint(1, 2)
self.__head = True if r == 1 else False
self.__toss_count += 1
if self.__head:
self.__head_count += 1
def get_head_percentage(self) -> float:
"""Returns the percentages of heads relative to the total umber of coin tosses."""
return self.__head_count * 100 / self.__toss_count
def is_head(self) -> bool:
"""Check if the coins shows heads."""
return self.__head
def get_head_count(self) -> int:
"""Return the number of tossed heads."""
return self.__head_count
def get_toss_count(self) -> int:
"""Return the number of tosses."""
return self.__toss_count
class Player:
def __init__(self, name: str, bet_amount: float) -> None:
self.__name = name
self.__amounts = [bet_amount]
@property
def name(self) -> str:
"""Name of the player."""
return self.__name
@property
def is_winner(self) -> bool:
"""
Check if the player is a winner.
If the current amount of a player is greater or equal to the initial amount it is a winner.
"""
return self.__amounts[-1] >= self.__amounts[0]
@property
def is_total_loss(self) -> bool:
"""
Check if player lost everything.
It is assumed that a total lost occurs if amount drops below 1% of the initial bet.
"""
return self.amount < self.amounts[0] / 100
@property
def amount(self) -> float:
"""
Current amount of the player.
"""
return self.__amounts[-1]
@property
def amounts(self) -> List[float]:
"""
The amounts for all rounds of the player.
The initial amount (bet) is stored at index 0.
"""
return self.__amounts
def add_new_amount(self, amount: float) -> None:
self.__amounts.append(amount)
class RoundResults:
def __init__(self, players: List[Player]) -> None:
self.__total_amounts: List[float] = []
self.__number_of_winners: List[int] = []
self.__number_of_losers: List[int] = []
self.__number_of_total_losses: List[int] = []
self.__winner_percentages: List[float] = []
self.__min_amounts: List[float] = []
self.__max_amounts: List[float] = []
self.__avg_amounts: List[float] = []
self.add_round(players)
def add_round(self, players: List[Player]) -> None:
total_amount = 0
number_of_winners = 0
number_of_total_losses = 0
min_amount = players[0].amount
max_amount = 0
for player in players:
total_amount += player.amount
min_amount = min(player.amount, min_amount)
max_amount = max(player.amount, max_amount)
if player.is_winner:
number_of_winners += 1
if player.is_total_loss:
number_of_total_losses += 1
winner_percentage = number_of_winners * 100 / len(players)
self.__total_amounts.append(total_amount)
self.__number_of_winners.append(number_of_winners)
self.__number_of_losers.append(len(players) - number_of_winners)
self.__number_of_total_losses.append(number_of_total_losses)
self.__winner_percentages.append(winner_percentage)
self.__min_amounts.append(min_amount)
self.__max_amounts.append(max_amount)
self.__avg_amounts.append(total_amount / len(players))
@property
def number_of_rounds(self) -> int:
return len(self.__total_amounts) - 1
@property
def total_amounts(self) -> List[float]:
return self.__total_amounts
@property
def avg_amounts(self) -> List[float]:
return self.__avg_amounts
@property
def number_of_winners(self) -> List[int]:
return self.__number_of_winners
@property
def number_of_losers(self) -> List[int]:
return self.__number_of_losers
@property
def number_of_total_losses(self) -> List[int]:
return self.__number_of_total_losses
@property
def winner_percentages(self) -> List[float]:
return self.__winner_percentages
@property
def min_amounts(self) -> List[float]:
return self.__min_amounts
@property
def max_amounts(self) -> List[float]:
return self.__max_amounts
class Gamble:
def __init__(
self,
name: str,
number_of_players: int,
number_of_rounds: int,
bet_amount: float,
gain_percentage: int,
loss_percentage: int,
) -> None:
assert number_of_players > 0
assert number_of_rounds > 0
assert bet_amount > 0
assert gain_percentage >= 0
assert loss_percentage >= 0 and loss_percentage <= 100
self.__coin = Coin()
self.__name: str = name
self.__gain_factor: float = 1.0 + gain_percentage / 100.0
self.__loss_factor: float = 1.0 - loss_percentage / 100.0
self.__number_of_rounds: int = number_of_rounds
self.__progress_callback: Optional[Callable[[str, int, int], None]] = None
self.__players = []
for i in range(1, number_of_players + 1):
self.__players.append(Player(name="p" + str(i), bet_amount=bet_amount))
self.__round_results = RoundResults(self.__players)
def set_progress_callback(self, callback: Callable[[str, int, int], None]) -> None:
self.__progress_callback = callback
@property
def name(self) -> str:
return self.__name
def _apply_rule(self, amount: float) -> float:
self.__coin.toss()
amount = (
amount * self.__gain_factor
if self.__coin.is_head()
else amount * self.__loss_factor
)
return round(amount, 2)
def _play_round(self, round_index: int) -> None:
for player in self.__players:
player.add_new_amount(self._apply_rule(player.amount))
return
def play(self) -> None:
for index in range(1, self.__number_of_rounds + 1):
self._play_round(index)
self.__round_results.add_round(self.__players)
if self.__progress_callback:
self.__progress_callback(self.name, index, self.__number_of_rounds)
return
@property
def results(self) -> RoundResults:
return self.__round_results
@property
def players(self) -> List[Player]:
return self.__players
@property
def max_amount(self) -> float:
return max(self.results.max_amounts) | [
"random.randint"
] | [((302, 315), 'random.randint', 'randint', (['(1)', '(2)'], {}), '(1, 2)\n', (309, 315), False, 'from random import randint\n')] |
## @file
# This file is used to define the FMMT dependent external tool management class.
#
# Copyright (c) 2021-, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import glob
import logging
import os
import shutil
import sys
import tempfile
import uuid
from edk2basetools.FMMT.PI.Common import *
from edk2basetools.FMMT.utils.FmmtLogger import FmmtLogger as logger
import subprocess
def ExecuteCommand(cmd: list) -> None:
subprocess.run(cmd,stdout=subprocess.DEVNULL)
class GUIDTool:
def __init__(self, guid: str, short_name: str, command: str) -> None:
self.guid: str = guid
self.short_name: str = short_name
self.command: str = command
def pack(self, buffer: bytes) -> bytes:
"""
compress file.
"""
tool = self.command
if tool:
tmp = tempfile.mkdtemp(dir=os.environ.get('tmp'))
ToolInputFile = os.path.join(tmp, "pack_uncompress_sec_file")
ToolOuputFile = os.path.join(tmp, "pack_sec_file")
try:
file = open(ToolInputFile, "wb")
file.write(buffer)
file.close()
command = [tool, '-e', '-o', ToolOuputFile,
ToolInputFile]
ExecuteCommand(command)
buf = open(ToolOuputFile, "rb")
res_buffer = buf.read()
except Exception as msg:
logger.error(msg)
return ""
else:
buf.close()
if os.path.exists(tmp):
shutil.rmtree(tmp)
return res_buffer
else:
logger.error(
"Error parsing section: EFI_SECTION_GUID_DEFINED cannot be parsed at this time.")
logger.info("Its GUID is: %s" % self.guid)
return ""
def unpack(self, buffer: bytes) -> bytes:
"""
buffer: remove common header
uncompress file
"""
tool = self.command
if tool:
tmp = tempfile.mkdtemp(dir=os.environ.get('tmp'))
ToolInputFile = os.path.join(tmp, "unpack_sec_file")
ToolOuputFile = os.path.join(tmp, "unpack_uncompress_sec_file")
try:
file = open(ToolInputFile, "wb")
file.write(buffer)
file.close()
command = [tool, '-d', '-o', ToolOuputFile, ToolInputFile]
ExecuteCommand(command)
buf = open(ToolOuputFile, "rb")
res_buffer = buf.read()
except Exception as msg:
logger.error(msg)
return ""
else:
buf.close()
if os.path.exists(tmp):
shutil.rmtree(tmp)
return res_buffer
else:
logger.error("Error parsing section: EFI_SECTION_GUID_DEFINED cannot be parsed at this time.")
logger.info("Its GUID is: %s" % self.guid)
return ""
class GUIDTools:
'''
GUIDTools is responsible for reading FMMTConfig.ini, verify the tools and provide interfaces to access those tools.
'''
default_tools = {
struct2stream(ModifyGuidFormat("a31280ad-481e-41b6-95e8-127f4c984779")): GUIDTool("a31280ad-481e-41b6-95e8-127f4c984779", "TIANO", "TianoCompress"),
struct2stream(ModifyGuidFormat("ee4e5898-3914-4259-9d6e-dc7bd79403cf")): GUIDTool("ee4e5898-3914-4259-9d6e-dc7bd79403cf", "LZMA", "LzmaCompress"),
struct2stream(ModifyGuidFormat("fc1bcdb0-7d31-49aa-936a-a4600d9dd083")): GUIDTool("fc1bcdb0-7d31-49aa-936a-a4600d9dd083", "CRC32", "GenCrc32"),
struct2stream(ModifyGuidFormat("d42ae6bd-1352-4bfb-909a-ca72a6eae889")): GUIDTool("d42ae6bd-1352-4bfb-909a-ca72a6eae889", "LZMAF86", "LzmaF86Compress"),
struct2stream(ModifyGuidFormat("3d532050-5cda-4fd0-879e-0f7f630d5afb")): GUIDTool("3d532050-5cda-4fd0-879e-0f7f630d5afb", "BROTLI", "BrotliCompress"),
}
def __init__(self, tooldef_file: str=None) -> None:
self.dir = os.path.dirname(__file__)
self.tooldef_file = tooldef_file if tooldef_file else os.path.join(
self.dir, "FMMTConfig.ini")
self.tooldef = dict()
self.load()
def VerifyTools(self) -> None:
"""
Verify Tools and Update Tools path.
"""
path_env = os.environ.get("PATH")
path_env_list = path_env.split(os.pathsep)
path_env_list.append(os.path.dirname(__file__))
path_env_list = list(set(path_env_list))
for tool in self.tooldef.values():
cmd = tool.command
if os.path.isabs(cmd):
if not os.path.exists(cmd):
print("Tool Not found %s" % cmd)
else:
for syspath in path_env_list:
if glob.glob(os.path.join(syspath, cmd+"*")):
break
else:
print("Tool Not found %s" % cmd)
def load(self) -> None:
if os.path.exists(self.tooldef_file):
with open(self.tooldef_file, "r") as fd:
config_data = fd.readlines()
for line in config_data:
try:
guid, short_name, command = line.split()
new_format_guid = struct2stream(ModifyGuidFormat(guid.strip()))
self.tooldef[new_format_guid] = GUIDTool(
guid.strip(), short_name.strip(), command.strip())
except:
print("GuidTool load error!")
continue
else:
self.tooldef.update(self.default_tools)
self.VerifyTools()
def __getitem__(self, guid) -> None:
return self.tooldef.get(guid)
guidtools = GUIDTools()
| [
"os.path.exists",
"os.path.isabs",
"subprocess.run",
"os.path.join",
"os.environ.get",
"edk2basetools.FMMT.utils.FmmtLogger.FmmtLogger.info",
"os.path.dirname",
"shutil.rmtree",
"edk2basetools.FMMT.utils.FmmtLogger.FmmtLogger.error"
] | [((473, 519), 'subprocess.run', 'subprocess.run', (['cmd'], {'stdout': 'subprocess.DEVNULL'}), '(cmd, stdout=subprocess.DEVNULL)\n', (487, 519), False, 'import subprocess\n'), ((4102, 4127), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4117, 4127), False, 'import os\n'), ((4417, 4439), 'os.environ.get', 'os.environ.get', (['"""PATH"""'], {}), "('PATH')\n", (4431, 4439), False, 'import os\n'), ((5077, 5110), 'os.path.exists', 'os.path.exists', (['self.tooldef_file'], {}), '(self.tooldef_file)\n', (5091, 5110), False, 'import os\n'), ((945, 990), 'os.path.join', 'os.path.join', (['tmp', '"""pack_uncompress_sec_file"""'], {}), "(tmp, 'pack_uncompress_sec_file')\n", (957, 990), False, 'import os\n'), ((1019, 1053), 'os.path.join', 'os.path.join', (['tmp', '"""pack_sec_file"""'], {}), "(tmp, 'pack_sec_file')\n", (1031, 1053), False, 'import os\n'), ((1703, 1807), 'edk2basetools.FMMT.utils.FmmtLogger.FmmtLogger.error', 'logger.error', (['"""Error parsing section: EFI_SECTION_GUID_DEFINED cannot be parsed at this time."""'], {}), "(\n 'Error parsing section: EFI_SECTION_GUID_DEFINED cannot be parsed at this time.'\n )\n", (1715, 1807), True, 'from edk2basetools.FMMT.utils.FmmtLogger import FmmtLogger as logger\n'), ((1827, 1869), 'edk2basetools.FMMT.utils.FmmtLogger.FmmtLogger.info', 'logger.info', (["('Its GUID is: %s' % self.guid)"], {}), "('Its GUID is: %s' % self.guid)\n", (1838, 1869), True, 'from edk2basetools.FMMT.utils.FmmtLogger import FmmtLogger as logger\n'), ((2160, 2196), 'os.path.join', 'os.path.join', (['tmp', '"""unpack_sec_file"""'], {}), "(tmp, 'unpack_sec_file')\n", (2172, 2196), False, 'import os\n'), ((2225, 2272), 'os.path.join', 'os.path.join', (['tmp', '"""unpack_uncompress_sec_file"""'], {}), "(tmp, 'unpack_uncompress_sec_file')\n", (2237, 2272), False, 'import os\n'), ((2888, 2992), 'edk2basetools.FMMT.utils.FmmtLogger.FmmtLogger.error', 'logger.error', (['"""Error parsing section: EFI_SECTION_GUID_DEFINED cannot be parsed at this time."""'], {}), "(\n 'Error parsing section: EFI_SECTION_GUID_DEFINED cannot be parsed at this time.'\n )\n", (2900, 2992), True, 'from edk2basetools.FMMT.utils.FmmtLogger import FmmtLogger as logger\n'), ((2995, 3037), 'edk2basetools.FMMT.utils.FmmtLogger.FmmtLogger.info', 'logger.info', (["('Its GUID is: %s' % self.guid)"], {}), "('Its GUID is: %s' % self.guid)\n", (3006, 3037), True, 'from edk2basetools.FMMT.utils.FmmtLogger import FmmtLogger as logger\n'), ((4190, 4230), 'os.path.join', 'os.path.join', (['self.dir', '"""FMMTConfig.ini"""'], {}), "(self.dir, 'FMMTConfig.ini')\n", (4202, 4230), False, 'import os\n'), ((4520, 4545), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4535, 4545), False, 'import os\n'), ((4685, 4703), 'os.path.isabs', 'os.path.isabs', (['cmd'], {}), '(cmd)\n', (4698, 4703), False, 'import os\n'), ((1583, 1602), 'os.path.exists', 'os.path.exists', (['tmp'], {}), '(tmp)\n', (1597, 1602), False, 'import os\n'), ((2768, 2787), 'os.path.exists', 'os.path.exists', (['tmp'], {}), '(tmp)\n', (2782, 2787), False, 'import os\n'), ((894, 915), 'os.environ.get', 'os.environ.get', (['"""tmp"""'], {}), "('tmp')\n", (908, 915), False, 'import os\n'), ((1474, 1491), 'edk2basetools.FMMT.utils.FmmtLogger.FmmtLogger.error', 'logger.error', (['msg'], {}), '(msg)\n', (1486, 1491), True, 'from edk2basetools.FMMT.utils.FmmtLogger import FmmtLogger as logger\n'), ((1624, 1642), 'shutil.rmtree', 'shutil.rmtree', (['tmp'], {}), '(tmp)\n', (1637, 1642), False, 'import shutil\n'), ((2109, 2130), 'os.environ.get', 'os.environ.get', (['"""tmp"""'], {}), "('tmp')\n", (2123, 2130), False, 'import os\n'), ((2659, 2676), 'edk2basetools.FMMT.utils.FmmtLogger.FmmtLogger.error', 'logger.error', (['msg'], {}), '(msg)\n', (2671, 2676), True, 'from edk2basetools.FMMT.utils.FmmtLogger import FmmtLogger as logger\n'), ((2809, 2827), 'shutil.rmtree', 'shutil.rmtree', (['tmp'], {}), '(tmp)\n', (2822, 2827), False, 'import shutil\n'), ((4728, 4747), 'os.path.exists', 'os.path.exists', (['cmd'], {}), '(cmd)\n', (4742, 4747), False, 'import os\n'), ((4899, 4931), 'os.path.join', 'os.path.join', (['syspath', "(cmd + '*')"], {}), "(syspath, cmd + '*')\n", (4911, 4931), False, 'import os\n')] |
from setuptools import setup, find_packages
setup(
name = "imgdup",
version = "1.3",
packages = find_packages(),
scripts = ['imgdup.py'],
install_requires = ['pillow>=2.8.1'],
# metadata for upload to PyPI
author = "<NAME>",
author_email = "<EMAIL>",
description = "Visual similarity image finder and cleaner (image deduplication tool)",
license = "MIT",
keywords = "deduplication duplicate images image visual finder",
url = "https://github.com/rif/imgdup", # project home page, if any
)
| [
"setuptools.find_packages"
] | [((108, 123), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (121, 123), False, 'from setuptools import setup, find_packages\n')] |
# 7. Write a program that estimates the average number of drawings it takes before the user’s
# numbers are picked in a lottery that consists of correctly picking six different numbers that
# are between 1 and 10. To do this, run a loop 1000 times that randomly generates a set of
# user numbers and simulates drawings until the user’s numbers are drawn. Find the average
# number of drawings needed over the 1000 times the loop runs.
import random
lottery_numbers = [i for i in range(1, 11)]
avg = 0
for i in range(1000):
user = random.randint(1, 10)
lott = random.choice(lottery_numbers)
if lott == user:
avg = avg + 1
print('Average number of drawings:', round(1000 / avg, 4))
| [
"random.choice",
"random.randint"
] | [((537, 558), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (551, 558), False, 'import random\n'), ((570, 600), 'random.choice', 'random.choice', (['lottery_numbers'], {}), '(lottery_numbers)\n', (583, 600), False, 'import random\n')] |
from django.conf import settings
from django.utils.importlib import import_module
from humfrey.update.transform.base import Transform
def get_transforms():
try:
return get_transforms._cache
except AttributeError:
pass
transforms = {'__builtins__': {}}
for class_path in settings.UPDATE_TRANSFORMS:
module_path, class_name = class_path.rsplit('.', 1)
transform = getattr(import_module(module_path), class_name)
assert issubclass(transform, Transform)
transforms[transform.__name__] = transform
get_transforms._cache = transforms
return transforms
def evaluate_pipeline(pipeline):
return eval('(%s)' % pipeline, get_transforms())
| [
"django.utils.importlib.import_module"
] | [((421, 447), 'django.utils.importlib.import_module', 'import_module', (['module_path'], {}), '(module_path)\n', (434, 447), False, 'from django.utils.importlib import import_module\n')] |
from lib.remote.remote_util import RemoteMachineShellConnection
from pytests.tuqquery.tuq import QueryTests
class TokenTests(QueryTests):
def setUp(self):
if not self._testMethodName == 'suite_setUp':
self.skip_buckets_handle = True
super(TokenTests, self).setUp()
self.n1ql_port = self.input.param("n1ql_port", 8093)
self.scan_consistency = self.input.param("scan_consistency", 'REQUEST_PLUS')
def tearDown(self):
server = self.master
shell = RemoteMachineShellConnection(server)
# shell.execute_command("""curl -X DELETE -u Administrator:password http://{0}:8091/pools/default/buckets/beer-sample""".format(server.ip))
self.sleep(20)
super(TokenTests, self).tearDown()
def test_tokens_secondary_indexes(self):
self.rest.load_sample("beer-sample")
self.sleep(20)
created_indexes = []
self.query = 'create primary index on `beer-sample`'
self.run_cbq_query()
self.query = 'create index idx1 on `beer-sample`(description,name )'
self.run_cbq_query()
self.query = 'create index idx2 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description) END ,description,name )'
self.run_cbq_query()
self.query = 'create index idx3 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"lower","names":true,"specials":false}) END ,description,name )'
self.run_cbq_query()
self.query = 'create index idx4 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false,"specials":true}) END ,description,name )'
self.run_cbq_query()
self.query = 'create index idx5 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false}) END ,description,name )'
self.run_cbq_query()
self.query = 'create index idx6 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper"}) END ,description,name )'
self.run_cbq_query()
self.query = 'create index idx7 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{}) END ,description,name )'
self.run_cbq_query()
self.query = 'create index idx8 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"":""}) END ,description,name )'
self.run_cbq_query()
self.query = 'create index idx9 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"random"}) END ,description,name )'
self.run_cbq_query()
self.query = 'create index idx10 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"names":"random"}) END ,description,name )'
self.run_cbq_query()
self.query = 'create index idx11 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"specials":"random"}) END ,description,name )'
self.run_cbq_query()
self.query = 'create index idx12 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description) END )'
self.run_cbq_query()
self.query = 'create index idx13 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"lower"}) END )'
self.run_cbq_query()
self.query = 'create index idx14 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper"}) END )'
self.run_cbq_query()
self.query = 'create index idx15 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"lower","names":true,"specials":false}) END )'
self.run_cbq_query()
self.query = 'create index idx16 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false,"specials":true}) END )'
self.run_cbq_query()
self.query = 'create index idx17 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false}) END )'
self.run_cbq_query()
self.query = 'create index idx18 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{}) END )'
self.run_cbq_query()
self.query = 'create index idx19 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"":""}) END )'
self.run_cbq_query()
self.query = 'create index idx20 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"random"}) END )'
self.run_cbq_query()
self.query = 'create index idx21 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"names":"random"}) END )'
self.run_cbq_query()
self.query = 'create index idx22 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"specials":"random"}) END )'
self.run_cbq_query()
for i in xrange(1,22):
index = 'idx{0}'.format(i)
created_indexes.append(index)
self.query = 'explain select name from `beer-sample` where any v in tokens(description) satisfies v = "golden" END limit 10'
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue(actual_result['results'])
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx2")
self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][0]) == ('cover ((distinct (array `v` for `v` in tokens((`beer-sample`.`description`)) end)))'))
self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(reverse(description)) satisfies v = "nedlog" END order by meta().id limit 10'
expected_result = self.run_cbq_query()
self.query = 'select name from `beer-sample` where any v in tokens(reverse(description)) satisfies v = "nedlog" END order by meta().id limit 10'
actual_result = self.run_cbq_query()
#self.assertTrue(str(actual_result['results'])=="[{u'name': u'21A IPA'}, {u'name': u'Amendment Pale Ale'}, {u'name': u'Double Trouble IPA'}, {u'name': u'South Park Blonde'}, {u'name': u'Restoration Pale Ale'}, {u'name': u'S.O.S'}, {u'name': u'Satsuma Harvest Wit'}, {u'name': u'Adnams Explorer'}, {u'name': u'Shock Top'}, {u'name': u'Anniversary Maibock'}]" )
self.assertTrue((actual_result['results'])== (expected_result['results']))
self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"case":"lower","names":true,"specials":false}) satisfies v = "brewery" END limit 10'
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][0]) == ('cover ((distinct (array `v` for `v` in tokens((`beer-sample`.`description`), {"case": "lower", "names": true, "specials": false}) end)))'))
self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx3")
self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"case":"lower","names":true,"specials":false}) satisfies v = "brewery" END order by meta().id limit 10'
expected_result = self.run_cbq_query()
self.query = 'select name from `beer-sample` use index(`idx15`) where any v in tokens(description,{"case":"lower","names":true,"specials":false}) satisfies v = "brewery" END order by meta().id limit 10'
actual_result = self.run_cbq_query()
self.assertTrue((actual_result['results'])== (expected_result['results']) )
self.query = 'explain select name from `beer-sample` use index(`idx14`) where any v in tokens(description,{"case":"upper","names":false,"specials":true}) satisfies v = "BREWERY" END order by meta().id limit 10'
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][0]) == ('cover ((distinct (array `v` for `v` in tokens((`beer-sample`.`description`), {"case": "upper", "names": false, "specials": true}) end)))'))
self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['index']) == "idx4")
self.query = 'select name from `beer-sample` use index(`idx16`) where any v in tokens(description,{"case":"upper","names":false,"specials":true}) satisfies v = "BREWERY" END order by meta().id limit 10'
actual_result = self.run_cbq_query()
self.assertTrue((actual_result['results'])== (expected_result['results']))
self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"case":"upper","names":false}) satisfies v = "GOLDEN" END limit 10'
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx5")
self.query = 'select name from `beer-sample` use index(`idx17`) where any v in tokens(description,{"case":"upper","names":false}) satisfies v = "GOLDEN" END limit 10'
actual_result = self.run_cbq_query()
self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"case":"upper","names":false}) satisfies v = "GOLDEN" END limit 10'
expected_result = self.run_cbq_query()
self.assertTrue(actual_result['results']==expected_result['results'])
self.query = 'explain select name from `beer-sample` where any v in tokens(description,{}) satisfies v = "golden" END limit 10'
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx7")
self.query = 'select name from `beer-sample` use index(`idx18`) where any v in tokens(description,{}) satisfies v = "golden" END limit 10'
actual_result = self.run_cbq_query()
self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{}) satisfies v = "golden" END limit 10'
expected_result = self.run_cbq_query()
self.assertTrue(actual_result['results']==expected_result['results'])
self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"":""}) satisfies v = "golden" END limit 10'
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx8")
self.query = 'select name from `beer-sample` use index(`idx19`) where any v in tokens(description,{"":""}) satisfies v = "golden" END order by name '
actual_result = self.run_cbq_query()
self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"":""}) satisfies v = "golden" END order by name '
expected_result = self.run_cbq_query()
self.assertTrue(actual_result['results']==expected_result['results'])
self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"case":"random"}) satisfies v = "golden" END '
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['scan']['index'] == "idx9")
self.query = 'select name from `beer-sample` use index(`idx20`) where any v in tokens(description,{"case":"random"}) satisfies v = "golden" END order by name '
actual_result = self.run_cbq_query()
self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"case":"random"}) satisfies v = "golden" END order by name '
expected_result = self.run_cbq_query()
self.assertTrue(actual_result['results']==expected_result['results'])
self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"specials":"random"}) satisfies v = "brewery" END order by name'
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx11")
self.query = 'select name from `beer-sample` use index(`idx22`) where any v in tokens(description,{"specials":"random"}) satisfies v = "golden" END order by name'
actual_result = self.run_cbq_query()
self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"specials":"random"}) satisfies v = "golden" END order by name'
expected_result = self.run_cbq_query()
self.assertTrue(actual_result['results']==expected_result['results'])
self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"names":"random"}) satisfies v = "brewery" END limit 10'
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx10")
self.query = 'select name from `beer-sample` use index(`idx21`) where any v in tokens(description,{"names":"random"}) satisfies v = "golden" END limit 10'
actual_result = self.run_cbq_query()
self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"names":"random"}) satisfies v = "golden" END limit 10'
expected_result = self.run_cbq_query()
self.assertTrue(actual_result['results']==expected_result['results'])
for idx in created_indexes:
self.query = "DROP INDEX %s.%s USING %s" % ("`beer-sample`", idx, self.index_type)
actual_result = self.run_cbq_query()
'''This test is specific to beer-sample bucket'''
def test_tokens_simple_syntax(self):
self.rest.load_sample("beer-sample")
bucket_doc_map = {"beer-sample": 7303}
bucket_status_map = {"beer-sample": "healthy"}
self.wait_for_buckets_status(bucket_status_map, 5, 120)
self.wait_for_bucket_docs(bucket_doc_map, 5, 120)
self._wait_for_index_online("beer-sample", "beer_primary")
self.sleep(10)
created_indexes = []
try:
idx1 = "idx_suffixes"
idx2 = "idx_tokens"
idx3 = "idx_pairs"
idx4 = "idx_addresses"
self.query = 'CREATE INDEX {0} ON `beer-sample`( DISTINCT SUFFIXES( name ) )'.format(idx1)
self.run_cbq_query()
self._wait_for_index_online("beer-sample", "beer_primary")
created_indexes.append(idx1)
self.query = "explain select * from `beer-sample` where name like '%Cafe%'"
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertEqual(plan['~children'][0]['scan']['index'], idx1)
self.query = 'CREATE INDEX {0} ON `beer-sample`( DISTINCT TOKENS( description ) )'.format(idx2)
self.run_cbq_query()
self._wait_for_index_online("beer-sample", "beer_primary")
created_indexes.append(idx2)
self.query = "explain select * from `beer-sample` where contains_token(description,'Great')"
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertEqual(plan['~children'][0]['scan']['index'], idx2)
self.query = "CREATE INDEX {0} ON `beer-sample`( DISTINCT PAIRS( SELF ) )".format(idx3)
self.run_cbq_query()
self._wait_for_index_online("beer-sample", "beer_primary")
created_indexes.append(idx3)
self.query = "explain select * from `beer-sample` where name like 'A%' and abv > 6"
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("idx_suffixes" in str(plan))
self.query = "CREATE INDEX {0} ON `beer-sample`( ALL address )".format(idx4)
self.run_cbq_query()
self._wait_for_index_online("beer-sample", "beer_primary")
created_indexes.append(idx4)
self.query = "explain select min(addr) from `beer-sample` unnest address as addr"
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertEqual(plan['~children'][0]['index'], idx4)
self.query = "explain select count(a) from `beer-sample` unnest address as a"
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertEqual(plan['~children'][0]['index'], idx4)
self.query = "explain select * from `beer-sample` where any place in address satisfies " \
"place LIKE '100 %' end"
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue(idx4 in str(plan))
self.assertTrue(idx3 in str(plan))
finally:
for idx in created_indexes:
self.query = "DROP INDEX `beer-sample`.%s" % (idx)
self.run_cbq_query()
self.rest.delete_bucket("beer-sample")
def test_dynamicindex_limit(self):
self.rest.load_sample("beer-sample")
self.sleep(20)
created_indexes = []
try:
idx1 = "idx_abv"
idx2 = "dynamic"
self.query = "CREATE INDEX idx_abv ON `beer-sample`( abv )"
self.run_cbq_query()
created_indexes.append(idx1)
self.query = "CREATE INDEX dynamic ON `beer-sample`( DISTINCT PAIRS( SELF ) )"
self.run_cbq_query()
created_indexes.append(idx2)
self.query = "Explain select * from `beer-sample` where abv > 5 LIMIT 10"
res = self.run_cbq_query()
plan = self.ExplainPlanHelper(res)
self.assertTrue(plan['~children'][0]['~children'][0]['limit']=='10')
finally:
for idx in created_indexes:
self.query = "DROP INDEX `beer-sample`.%s" % ( idx)
self.run_cbq_query()
| [
"lib.remote.remote_util.RemoteMachineShellConnection"
] | [((514, 550), 'lib.remote.remote_util.RemoteMachineShellConnection', 'RemoteMachineShellConnection', (['server'], {}), '(server)\n', (542, 550), False, 'from lib.remote.remote_util import RemoteMachineShellConnection\n')] |
import logging
from http import cookiejar as http_cookiejar
from http.cookiejar import http2time # type: ignore
from typing import Any # noqa
from typing import Dict # noqa
from urllib.parse import parse_qs
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
from oic.exception import UnSupported
from oic.oauth2.exception import TimeFormatError
from oic.utils.sanitize import sanitize
logger = logging.getLogger(__name__)
__author__ = "roland"
URL_ENCODED = "application/x-www-form-urlencoded"
JSON_ENCODED = "application/json"
DEFAULT_POST_CONTENT_TYPE = URL_ENCODED
PAIRS = {
"port": "port_specified",
"domain": "domain_specified",
"path": "path_specified",
}
ATTRS = {
"version": None,
"name": "",
"value": None,
"port": None,
"port_specified": False,
"domain": "",
"domain_specified": False,
"domain_initial_dot": False,
"path": "",
"path_specified": False,
"secure": False,
"expires": None,
"discard": True,
"comment": None,
"comment_url": None,
"rest": "",
"rfc2109": True,
} # type: Dict[str, Any]
def get_or_post(
uri, method, req, content_type=DEFAULT_POST_CONTENT_TYPE, accept=None, **kwargs
):
"""
Construct HTTP request.
:param uri:
:param method:
:param req:
:param content_type:
:param accept:
:param kwargs:
:return:
"""
if method in ["GET", "DELETE"]:
if req.keys():
_req = req.copy()
comp = urlsplit(str(uri))
if comp.query:
_req.update(parse_qs(comp.query))
_query = str(_req.to_urlencoded())
path = urlunsplit(
(comp.scheme, comp.netloc, comp.path, _query, comp.fragment)
)
else:
path = uri
body = None
elif method in ["POST", "PUT"]:
path = uri
if content_type == URL_ENCODED:
body = req.to_urlencoded()
elif content_type == JSON_ENCODED:
body = req.to_json()
else:
raise UnSupported("Unsupported content type: '%s'" % content_type)
header_ext = {"Content-Type": content_type}
if accept:
header_ext = {"Accept": accept}
if "headers" in kwargs.keys():
kwargs["headers"].update(header_ext)
else:
kwargs["headers"] = header_ext
else:
raise UnSupported("Unsupported HTTP method: '%s'" % method)
return path, body, kwargs
def set_cookie(cookiejar, kaka):
"""
Place a cookie (a http_cookielib.Cookie based on a set-cookie header line) in the cookie jar.
Always chose the shortest expires time.
:param cookiejar:
:param kaka: Cookie
"""
# default rfc2109=False
# max-age, httponly
for cookie_name, morsel in kaka.items():
std_attr = ATTRS.copy()
std_attr["name"] = cookie_name
_tmp = morsel.coded_value
if _tmp.startswith('"') and _tmp.endswith('"'):
std_attr["value"] = _tmp[1:-1]
else:
std_attr["value"] = _tmp
std_attr["version"] = 0
attr = ""
# copy attributes that have values
try:
for attr in morsel.keys():
if attr in ATTRS:
if morsel[attr]:
if attr == "expires":
std_attr[attr] = http2time(morsel[attr])
else:
std_attr[attr] = morsel[attr]
elif attr == "max-age":
if morsel[attr]:
std_attr["expires"] = http2time(morsel[attr])
except TimeFormatError:
# Ignore cookie
logger.info(
"Time format error on %s parameter in received cookie"
% (sanitize(attr),)
)
continue
for att, spec in PAIRS.items():
if std_attr[att]:
std_attr[spec] = True
if std_attr["domain"] and std_attr["domain"].startswith("."):
std_attr["domain_initial_dot"] = True
if morsel["max-age"] == 0:
try:
cookiejar.clear(
domain=std_attr["domain"],
path=std_attr["path"],
name=std_attr["name"],
)
except ValueError:
pass
else:
# Fix for Microsoft cookie error
if "version" in std_attr:
try:
std_attr["version"] = std_attr["version"].split(",")[0]
except (TypeError, AttributeError):
pass
new_cookie = http_cookiejar.Cookie(**std_attr) # type: ignore
cookiejar.set_cookie(new_cookie)
def match_to_(val, vlist):
if isinstance(vlist, str):
if vlist.startswith(val):
return True
else:
for v in vlist:
if v.startswith(val):
return True
return False
def verify_header(reqresp, body_type):
logger.debug("resp.headers: %s" % (sanitize(reqresp.headers),))
logger.debug("resp.txt: %s" % (sanitize(reqresp.text),))
if body_type == "":
_ctype = reqresp.headers["content-type"]
if match_to_("application/json", _ctype):
body_type = "json"
elif match_to_("application/jwt", _ctype):
body_type = "jwt"
elif match_to_(URL_ENCODED, _ctype):
body_type = "urlencoded"
else:
body_type = "txt" # reasonable default ??
elif body_type == "json":
if not match_to_("application/json", reqresp.headers["content-type"]):
if match_to_("application/jwt", reqresp.headers["content-type"]):
body_type = "jwt"
else:
raise ValueError(
"content-type: %s" % (reqresp.headers["content-type"],)
)
elif body_type == "jwt":
if not match_to_("application/jwt", reqresp.headers["content-type"]):
raise ValueError(
"Wrong content-type in header, got: {} expected "
"'application/jwt'".format(reqresp.headers["content-type"])
)
elif body_type == "urlencoded":
if not match_to_(DEFAULT_POST_CONTENT_TYPE, reqresp.headers["content-type"]):
if not match_to_("text/plain", reqresp.headers["content-type"]):
raise ValueError("Wrong content-type")
else:
raise ValueError("Unknown return format: %s" % body_type)
return body_type
| [
"logging.getLogger",
"oic.utils.sanitize.sanitize",
"oic.exception.UnSupported",
"urllib.parse.urlunsplit",
"urllib.parse.parse_qs",
"http.cookiejar.Cookie",
"http.cookiejar.http2time"
] | [((418, 445), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (435, 445), False, 'import logging\n'), ((1664, 1736), 'urllib.parse.urlunsplit', 'urlunsplit', (['(comp.scheme, comp.netloc, comp.path, _query, comp.fragment)'], {}), '((comp.scheme, comp.netloc, comp.path, _query, comp.fragment))\n', (1674, 1736), False, 'from urllib.parse import urlunsplit\n'), ((2413, 2466), 'oic.exception.UnSupported', 'UnSupported', (['("Unsupported HTTP method: \'%s\'" % method)'], {}), '("Unsupported HTTP method: \'%s\'" % method)\n', (2424, 2466), False, 'from oic.exception import UnSupported\n'), ((4701, 4734), 'http.cookiejar.Cookie', 'http_cookiejar.Cookie', ([], {}), '(**std_attr)\n', (4722, 4734), True, 'from http import cookiejar as http_cookiejar\n'), ((5108, 5133), 'oic.utils.sanitize.sanitize', 'sanitize', (['reqresp.headers'], {}), '(reqresp.headers)\n', (5116, 5133), False, 'from oic.utils.sanitize import sanitize\n'), ((5172, 5194), 'oic.utils.sanitize.sanitize', 'sanitize', (['reqresp.text'], {}), '(reqresp.text)\n', (5180, 5194), False, 'from oic.utils.sanitize import sanitize\n'), ((1575, 1595), 'urllib.parse.parse_qs', 'parse_qs', (['comp.query'], {}), '(comp.query)\n', (1583, 1595), False, 'from urllib.parse import parse_qs\n'), ((2066, 2126), 'oic.exception.UnSupported', 'UnSupported', (['("Unsupported content type: \'%s\'" % content_type)'], {}), '("Unsupported content type: \'%s\'" % content_type)\n', (2077, 2126), False, 'from oic.exception import UnSupported\n'), ((3399, 3422), 'http.cookiejar.http2time', 'http2time', (['morsel[attr]'], {}), '(morsel[attr])\n', (3408, 3422), False, 'from http.cookiejar import http2time\n'), ((3634, 3657), 'http.cookiejar.http2time', 'http2time', (['morsel[attr]'], {}), '(morsel[attr])\n', (3643, 3657), False, 'from http.cookiejar import http2time\n'), ((3833, 3847), 'oic.utils.sanitize.sanitize', 'sanitize', (['attr'], {}), '(attr)\n', (3841, 3847), False, 'from oic.utils.sanitize import sanitize\n')] |
#!/usr/bin/python
#
# Decoding a legacy chain ric
#
import pyrfa
p = pyrfa.Pyrfa()
p.createConfigDb("./pyrfa.cfg")
p.acquireSession("Session1")
p.createOMMConsumer()
p.login()
p.directoryRequest()
p.dictionaryRequest()
p.setInteractionType("snapshot")
def snapshotRequest(chainRIC):
p.marketPriceRequest(chainRIC)
snapshots = p.dispatchEventQueue(1000)
if snapshots:
for snapshot in snapshots:
if snapshot['MTYPE'] == 'IMAGE':
return snapshot
return ()
fids = ['LINK_1', 'LINK_2', 'LINK_3', 'LINK_4', 'LINK_5', 'LINK_6', 'LINK_7', 'LINK_8',
'LINK_9', 'LINK_10', 'LINK_11', 'LINK_12', 'LINK_13', 'LINK_14',
'LONGLINK1', 'LONGLINK2', 'LONGLINK3', 'LONGLINK4', 'LONGLINK5', 'LONGLINK6', 'LONGLINK7',
'LONGLINK8', 'LONGLINK9', 'LONGLINK10', 'LONGLINK11', 'LONGLINK12', 'LONGLINK13', 'LONGLINK14',
'BR_LINK1', 'BR_LINK2', 'BR_LINK3', 'BR_LINK4', 'BR_LINK5', 'BR_LINK6', 'BR_LINK7', 'BR_LINK8',
'BR_LINK9', 'BR_LINK10', 'BR_LINK11', 'BR_LINK12', 'BR_LINK13', 'BR_LINK14']
def expandChainRIC(chainRIC):
expanded = []
done = False
snapshot = snapshotRequest(chainRIC)
while not done:
if not snapshot:
break
for fid in fids:
if snapshot.has_key(fid) and snapshot[fid]:
expanded.append(snapshot[fid])
if snapshot.has_key('NEXT_LR') and snapshot['NEXT_LR']:
snapshot = snapshotRequest(snapshot['NEXT_LR'])
elif snapshot.has_key('LONGNEXTLR') and snapshot['LONGNEXTLR']:
snapshot = snapshotRequest(snapshot['LONGNEXTLR'])
elif snapshot.has_key('BR_NEXTLR') and snapshot['BR_NEXTLR']:
snapshot = snapshotRequest(snapshot['BR_NEXTLR'])
else:
done = True
return expanded
rics = expandChainRIC("0#.FTSE")
print(rics)
| [
"pyrfa.Pyrfa"
] | [((70, 83), 'pyrfa.Pyrfa', 'pyrfa.Pyrfa', ([], {}), '()\n', (81, 83), False, 'import pyrfa\n')] |
import requests as reqlib
import os
import re
import random
import time
import pickle
import abc
import hashlib
import threading
from urllib.parse import urlparse
from purifier import TEAgent
from purifier.logb import getLogger
from enum import IntEnum
from typing import Tuple, List, Dict, Optional
class ScraperTimeout(Exception):
def __init__(self, ex):
self.ex = ex
def __str__(self):
return f"Timeout: {self.ex}"
class ScraperNot200(Exception):
def __init__(self, sc):
self.sc = sc
def __str__(self):
return f"Unexpected Status Code={self.sc}!"
class UnsupportedMIME(Exception):
def __init__(self, mime):
self.mime = mime
def __str__(self):
return f"Unsupported MIME={self.mime}!"
class Scraper(metaclass=abc.ABCMeta):
@abc.abstractmethod
def get(self, url):
pass
class ReqScraper(object):
def __init__(self,
page_cache_path="page_caches",
headers={'User-Agent': 'Mozilla/5.0'},
skip_cache=False,
supported_mime_set={"text/html"}):
self.page_cache_path = page_cache_path
if not os.path.isdir(self.page_cache_path):
os.makedirs(self.page_cache_path)
self.headers = headers
self.logger = getLogger(os.path.basename(self.__class__.__name__))
self.skip_cache = skip_cache
self.supported_mime_set = supported_mime_set
def _get_cache_path(self, url):
test_url_host = urlparse(url).netloc
url_md5 = hashlib.md5(url.encode('utf-8')).hexdigest()
cache_file_name = f"{test_url_host}_{url_md5}.txt"
cache_file_path = os.path.join(self.page_cache_path, cache_file_name)
return cache_file_path
def _del_from_cache(self, url):
cache_file_path = self._get_cache_path(url)
if os.path.isfile(cache_file_path):
self.logger.warning("Removing cache file={cache_file_path}...")
os.remove(cache_file_path)
def _get_from_cache(self, url):
cache_file_path = self._get_cache_path(url)
if os.path.isfile(cache_file_path):
self.logger.debug(f"Return content of {url} from cache...")
with open(cache_file_path, 'r', encoding='utf8') as fo:
return fo.read()
return None
def _save2cache(self, url, html_content):
cache_file_path = self._get_cache_path(url)
with open(cache_file_path, 'w', encoding='utf8') as fw:
fw.write(html_content)
def get(self, url):
if not self.skip_cache:
cache_text = self._get_from_cache(url)
if cache_text is not None:
return cache_text
self.logger.debug(f"Crawling {url}...")
try:
resp = reqlib.get(url, headers=self.headers, timeout=(5, 10))
if resp.ok:
mime = resp.headers['content-type'].split(';')[0].strip()
self.logger.debug(f"URL={url} with MIME={mime}...")
if mime.lower() not in self.supported_mime_set:
raise UnsupportedMIME(mime)
self._save2cache(url, resp.text)
return resp.text
else:
raise ScraperNot200(resp.status_code)
except Exception as e:
raise ScraperTimeout(e)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class ThreadState(IntEnum):
STOPPED = 0
RUNNING = 1
STOPPING = 2
class CrawlAgent(object):
def __init__(self, name, throttling_range=(1, 2)):
self.rs = ReqScraper(page_cache_path=f"{name}_cache")
self.et = TEAgent(
policy_path="policy",
disable_policy=True,
ext_title=True
)
self.logger = getLogger(os.path.basename(self.__class__.__name__))
self.throttling_range = throttling_range
def obsolete_cache(self, url):
self.rs._del_from_cache(url)
def handle(self, url:str, skip_throttling:bool=False) -> Tuple[str, str, List[str]]:
try:
if skip_throttling:
wait_in_sec = random.uniform(*self.throttling_range)
self.logger.debug(f"throttling wait {wait_in_sec}s...")
time.sleep(wait_in_sec)
url_content_html = self.rs.get(url)
is_succ, rst, handler = self.et.parse(
"text/html",
url,
url_content_html,
do_ext_link=True
)
if is_succ:
return (rst['title'], rst['text'], rst['all_links'])
else:
return (rst['title'], rst['text'], rst['all_links'])
except ScraperNot200 as e:
self.logger.warning(f"Fail to handle URL={url}: {str(e)}")
return None, None, None
except UnsupportedMIME as e:
self.logger.warning(f"Fail to handle URL={url}: {str(e)}")
return None, None, None
except ScraperTimeout as e:
time.sleep(2)
self.logger.warning(f"Fail to handle URL={url}: {str(e)}")
return None, None, None
class ExplorerWorker(threading.Thread):
def __init__(
self,
name:str,
url_ptn:str,
src_url:str,
test_run:int=-1,
page_saved_dir:Optional[str]=None):
super(ExplorerWorker, self ).__init__(name = name)
self.name = name
self.url_ptn = url_ptn
self.src_url = src_url
self.test_run = test_run
self.ca = CrawlAgent(name)
self.pc_dict = self._get_pc_dict()
''' Processed result cache: Key as URL; value as bool (True means this URL is crawled successfully)'''
self.state = ThreadState.STOPPED
''' Thread state: 0-> stopped; 1-> running; 2-> stopping'''
self.logger = getLogger(os.path.basename(self.__class__.__name__))
''' Logger object '''
self.page_saved_dir = page_saved_dir if page_saved_dir is not None else f"{self.name}_pages_output"
''' Path or directory to save dump page'''
self.stop_signal = f"STOP_{self.name}"
''' Stop signal file '''
if not os.path.isdir(self.page_saved_dir):
os.makedirs(self.page_saved_dir)
def _get_output_page_path(self, url):
url_host = urlparse(url).netloc
url_md5 = hashlib.md5(url.encode('utf-8')).hexdigest()
page_file_name = f"{url_host}_{url_md5}.txt"
page_file_path = os.path.join(self.page_saved_dir, page_file_name)
return page_file_path
def _get_pc_serialized_file(self) -> str:
return f"{self.name}_pc_dict.pkl"
def _get_pc_dict(self) -> Dict[str, bool]:
pkl_file = self._get_pc_serialized_file()
if os.path.isfile(pkl_file):
with open(pkl_file, 'rb') as fo:
return pickle.load(fo)
else:
return {}
def _serialized(self):
pkl_file = self._get_pc_serialized_file()
with open(pkl_file, 'wb') as fo:
pickle.dump(self.pc_dict, fo)
def run(self):
self.state = ThreadState.RUNNING
url_queue = [self.src_url]
pc = sc = fc = oc = 0
while self.state == ThreadState.RUNNING and url_queue:
if os.path.isfile(self.stop_signal):
os.remove(self.stop_signal)
self.logger.warning("Receive STOP signal!")
break
url = url_queue.pop(0)
pc += 1
if url not in self.pc_dict:
# New URL
self.logger.debug(f"Handling URL={url}...")
title, content, collected_urls = self.ca.handle(url)
if content is None:
self.pc_dict[url] = False
fc += 1
else:
if url != self.src_url:
self.pc_dict[url] = True
sc += 1
self.logger.info(bcolors.BOLD + f"Completed URL={url} ({len(url_queue):,d}/{pc:,d})" + bcolors.ENDC)
next_level_urls = list(filter(lambda u: re.match(self.url_ptn, u) is not None and "#" not in u, collected_urls))
if next_level_urls:
self.logger.debug(f"\tCollected {len(next_level_urls)} next level URL(s)")
url_queue.extend(list(set(next_level_urls) - set(url_queue)))
if content and "?" not in url:
page_output_path = self._get_output_page_path(url)
with open(page_output_path, 'w', encoding='utf8') as fw:
fw.write(f"{url}\n\n")
fw.write(f"{title}\n\n")
fw.write(f"{content}")
self.logger.debug(f"\tSaved page to {page_output_path}!")
else:
# Old URL
if not self.pc_dict[url]:
self.logger.info(f"Skip broken URL={url} in the past...")
continue
title, content, collected_urls = self.ca.handle(url, skip_throttling=True)
if collected_urls:
next_level_urls = list(filter(lambda u: re.match(self.url_ptn, u) is not None, collected_urls))
url_queue.extend(list(set(next_level_urls) - set(url_queue)))
oc += 1
self.logger.info(f"URL={url} is already handled...({len(url_queue):,d}/{pc:,d})")
continue
if self.test_run > 0:
if (sc + fc) > self.test_run:
self.logger.info(f"Exceed test_run={self.test_run} and therefore stop running...")
break
if pc % 1000 == 0:
self.logger.info(bcolors.OKBLUE + bcolors.BOLD + f"{pc} URL completed: sc={sc:,d}; fc={fc:,d}; oc={oc:,d}\n" + bcolors.ENDC)
self._serialized()
self.ca.obsolete_cache(self.src_url)
url_queue.append(self.src_url)
self.logger.warning(f"Serialized explorer result (name={self.name})...")
self._serialized()
self.logger.warning(f"Explorer is stopped! (name={self.name})...")
self.state = ThreadState.STOPPED
def stop(self):
self.logger.warning(f"Stopping explorer worker (name={self.name})...")
if self.state == ThreadState.RUNNING:
self.state = ThreadState.STOPPING
while self.state != ThreadState.STOPPED:
time.sleep(1)
| [
"random.uniform",
"pickle.dump",
"urllib.parse.urlparse",
"os.makedirs",
"purifier.TEAgent",
"os.path.join",
"pickle.load",
"requests.get",
"time.sleep",
"os.path.isfile",
"re.match",
"os.path.isdir",
"os.path.basename",
"os.remove"
] | [((1808, 1859), 'os.path.join', 'os.path.join', (['self.page_cache_path', 'cache_file_name'], {}), '(self.page_cache_path, cache_file_name)\n', (1820, 1859), False, 'import os\n'), ((1996, 2027), 'os.path.isfile', 'os.path.isfile', (['cache_file_path'], {}), '(cache_file_path)\n', (2010, 2027), False, 'import os\n'), ((2262, 2293), 'os.path.isfile', 'os.path.isfile', (['cache_file_path'], {}), '(cache_file_path)\n', (2276, 2293), False, 'import os\n'), ((4087, 4153), 'purifier.TEAgent', 'TEAgent', ([], {'policy_path': '"""policy"""', 'disable_policy': '(True)', 'ext_title': '(True)'}), "(policy_path='policy', disable_policy=True, ext_title=True)\n", (4094, 4153), False, 'from purifier import TEAgent\n'), ((7053, 7102), 'os.path.join', 'os.path.join', (['self.page_saved_dir', 'page_file_name'], {}), '(self.page_saved_dir, page_file_name)\n', (7065, 7102), False, 'import os\n'), ((7347, 7371), 'os.path.isfile', 'os.path.isfile', (['pkl_file'], {}), '(pkl_file)\n', (7361, 7371), False, 'import os\n'), ((1258, 1293), 'os.path.isdir', 'os.path.isdir', (['self.page_cache_path'], {}), '(self.page_cache_path)\n', (1271, 1293), False, 'import os\n'), ((1308, 1341), 'os.makedirs', 'os.makedirs', (['self.page_cache_path'], {}), '(self.page_cache_path)\n', (1319, 1341), False, 'import os\n'), ((1433, 1474), 'os.path.basename', 'os.path.basename', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (1449, 1474), False, 'import os\n'), ((1636, 1649), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (1644, 1649), False, 'from urllib.parse import urlparse\n'), ((2119, 2145), 'os.remove', 'os.remove', (['cache_file_path'], {}), '(cache_file_path)\n', (2128, 2145), False, 'import os\n'), ((3001, 3055), 'requests.get', 'reqlib.get', (['url'], {'headers': 'self.headers', 'timeout': '(5, 10)'}), '(url, headers=self.headers, timeout=(5, 10))\n', (3011, 3055), True, 'import requests as reqlib\n'), ((4238, 4279), 'os.path.basename', 'os.path.basename', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (4254, 4279), False, 'import os\n'), ((6390, 6431), 'os.path.basename', 'os.path.basename', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (6406, 6431), False, 'import os\n'), ((6733, 6767), 'os.path.isdir', 'os.path.isdir', (['self.page_saved_dir'], {}), '(self.page_saved_dir)\n', (6746, 6767), False, 'import os\n'), ((6782, 6814), 'os.makedirs', 'os.makedirs', (['self.page_saved_dir'], {}), '(self.page_saved_dir)\n', (6793, 6814), False, 'import os\n'), ((6888, 6901), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (6896, 6901), False, 'from urllib.parse import urlparse\n'), ((7637, 7666), 'pickle.dump', 'pickle.dump', (['self.pc_dict', 'fo'], {}), '(self.pc_dict, fo)\n', (7648, 7666), False, 'import pickle\n'), ((7890, 7922), 'os.path.isfile', 'os.path.isfile', (['self.stop_signal'], {}), '(self.stop_signal)\n', (7904, 7922), False, 'import os\n'), ((4585, 4623), 'random.uniform', 'random.uniform', (['*self.throttling_range'], {}), '(*self.throttling_range)\n', (4599, 4623), False, 'import random\n'), ((4714, 4737), 'time.sleep', 'time.sleep', (['wait_in_sec'], {}), '(wait_in_sec)\n', (4724, 4737), False, 'import time\n'), ((5521, 5534), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5531, 5534), False, 'import time\n'), ((7443, 7458), 'pickle.load', 'pickle.load', (['fo'], {}), '(fo)\n', (7454, 7458), False, 'import pickle\n'), ((7941, 7968), 'os.remove', 'os.remove', (['self.stop_signal'], {}), '(self.stop_signal)\n', (7950, 7968), False, 'import os\n'), ((11346, 11359), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (11356, 11359), False, 'import time\n'), ((9976, 10001), 're.match', 're.match', (['self.url_ptn', 'u'], {}), '(self.url_ptn, u)\n', (9984, 10001), False, 'import re\n'), ((8793, 8818), 're.match', 're.match', (['self.url_ptn', 'u'], {}), '(self.url_ptn, u)\n', (8801, 8818), False, 'import re\n')] |
import decimal
import hashlib
import json
import requests
import tempfile
import uuid
import os
from tqdm import tqdm
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
def sha256_for_file(f, buf_size=65536):
pos = f.tell()
dgst = hashlib.sha256()
while True:
data = f.read(buf_size)
if not data:
break
dgst.update(data)
size = f.tell() - pos
f.seek(pos)
return size, dgst.hexdigest()
namespace = "default"
fission_url = os.environ["FISSION_URL"]
def post(rel_url, data):
response = requests.post(
"%s%s" % (fission_url, rel_url),
data=json.dumps(data),
headers={"Content-Type": "application/json"})
# print("POST", rel_url)
# print(response, response.text)
if response.status_code in [404, 409]:
return response.status_code, None
if response.status_code == 500:
raise Exception(response.text)
return response.status_code, response.json()
def get(rel_url, params=None):
response = requests.get(
"%s%s" % (fission_url, rel_url),
params=params)
if response.status_code == 404:
return response.status_code, None
if response.status_code == 500:
raise Exception(response.text)
return response.status_code, response.json()
def format_bytes(count):
label_ix = 0
labels = ["B", "KiB", "MiB", "GiB"]
while label_ix < len(labels) and count / 1024. > 1:
count = count / 1024.
label_ix += 1
count = decimal.Decimal(count)
count = count.to_integral() if count == count.to_integral() else round(count.normalize(), 2)
return "%s %s" % (count, labels[label_ix])
def lazily_define_package(environment, file):
filesize, archive_sha256 = sha256_for_file(file)
base_archive_url = "%s/proxy/storage/v1/archive" % fission_url
status_code, response = get("/v2/packages/%s" % archive_sha256)
if status_code == 200:
print("Already uploaded", flush=True)
return archive_sha256, response
progress = tqdm(
total=filesize,
desc="Uploading",
unit="B",
unit_scale=True,
unit_divisor=1024,
leave=True)
last_bytes_read = 0
def update_progress(monitor):
# Your callback function
nonlocal last_bytes_read
progress.update(monitor.bytes_read - last_bytes_read)
last_bytes_read = monitor.bytes_read
e = MultipartEncoder(fields={'uploadfile': ('uploaded', file, 'text/plain')})
m = MultipartEncoderMonitor(e, update_progress)
archive_response = requests.post(base_archive_url,
data=m,
headers={
"X-File-Size": str(filesize),
'Content-Type': m.content_type})
archive_id = archive_response.json()['id']
print(" done", flush=True)
archive_url = "%s?id=%s" % (base_archive_url, archive_id)
package = {
"metadata": {
"name": archive_sha256,
"namespace": namespace,
},
"spec": {
"environment": environment,
"deployment": {
"type": "url",
"url": archive_url,
"checksum": {
"type": "sha256",
"sum": archive_sha256,
},
},
},
"status": {
"buildstatus": "succeeded",
},
}
return archive_sha256, post("/v2/packages", package)[1]
def lazily_define_function(environment, f):
archive_sha256, package_ref = lazily_define_package(environment, f)
print("Registering ...", end='', flush=True)
function_name = archive_sha256[:8]
status_code, response = get("/v2/functions/%s" % function_name)
if status_code == 200:
return function_name
status_code, r = post("/v2/functions", {
"metadata": {
"name": function_name,
"namespace": namespace,
},
"spec": {
"environment": environment,
"package": {
"functionName": function_name,
"packageref": package_ref,
},
},
})
if status_code == 409 or status_code == 201:
print(" done", flush=True)
return function_name
print(" error", flush=True)
raise Exception(r.text)
def lazily_define_trigger2(function_name, http_method, host, relativeurl):
trigger_name = "%s-%s-%s" % (
host.replace('.', '-'),
relativeurl.replace(':.*', '').replace('{', '').replace('}', '').replace('/', '-'),
http_method.lower())
status_code, response = get("/v2/triggers/http/%s" % trigger_name)
if status_code == 200:
return
status_code, r = post("/v2/triggers/http", {
"metadata": {
"name": trigger_name,
"namespace": namespace,
},
"spec": {
"host": host,
"relativeurl": relativeurl,
"method": http_method,
"functionref": {
"Type": "name",
"Name": function_name,
},
},
})
if status_code == 409 or status_code == 201:
return
raise Exception(r.text)
def publish(environment_name, f):
environment = {
"namespace": namespace,
"name": environment_name,
}
function_name = lazily_define_function(environment, f)
host = "%s.tfi.gcp.tesserai.com" % function_name
lazily_define_trigger2(function_name, "POST", host, "/{path-info:.*}")
lazily_define_trigger2(function_name, "GET", host, "/{path-info:.*}")
lazily_define_trigger2(function_name, "GET", host, "/")
return "http://%s" % host
| [
"hashlib.sha256",
"requests_toolbelt.MultipartEncoderMonitor",
"tqdm.tqdm",
"json.dumps",
"requests.get",
"requests_toolbelt.MultipartEncoder",
"decimal.Decimal"
] | [((263, 279), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (277, 279), False, 'import hashlib\n'), ((1049, 1109), 'requests.get', 'requests.get', (["('%s%s' % (fission_url, rel_url))"], {'params': 'params'}), "('%s%s' % (fission_url, rel_url), params=params)\n", (1061, 1109), False, 'import requests\n'), ((1540, 1562), 'decimal.Decimal', 'decimal.Decimal', (['count'], {}), '(count)\n', (1555, 1562), False, 'import decimal\n'), ((2072, 2172), 'tqdm.tqdm', 'tqdm', ([], {'total': 'filesize', 'desc': '"""Uploading"""', 'unit': '"""B"""', 'unit_scale': '(True)', 'unit_divisor': '(1024)', 'leave': '(True)'}), "(total=filesize, desc='Uploading', unit='B', unit_scale=True,\n unit_divisor=1024, leave=True)\n", (2076, 2172), False, 'from tqdm import tqdm\n'), ((2483, 2556), 'requests_toolbelt.MultipartEncoder', 'MultipartEncoder', ([], {'fields': "{'uploadfile': ('uploaded', file, 'text/plain')}"}), "(fields={'uploadfile': ('uploaded', file, 'text/plain')})\n", (2499, 2556), False, 'from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor\n'), ((2565, 2608), 'requests_toolbelt.MultipartEncoderMonitor', 'MultipartEncoderMonitor', (['e', 'update_progress'], {}), '(e, update_progress)\n', (2588, 2608), False, 'from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor\n'), ((651, 667), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (661, 667), False, 'import json\n')] |
from __future__ import annotations
import sys
from skeema.intermediate.compiler.parser import Parser
from skeema import ModelMeta
from skeema import util
def private(name):
return f'_{name}'
class ClassBuilder:
"""
ClassBuilder
"""
@staticmethod
def set_class_module(klass, module_name: str):
klass.__module__ = module_name
module = sys.modules[module_name]
module.__dict__[klass.__name__] = klass
@staticmethod
def create_class(class_name: str, base_classes: [str], parameters: [dict], data_members: [dict]):
module_name = 'skeema'
# Populate a dictionary of property accessors
cls_dict = dict()
# Parsing for json
def parse(cls, json_str: str):
return Parser.parse(cls, json_str)
cls_dict['parse'] = classmethod(parse)
def decorate(annotation: str, array: bool) -> str:
if array:
return f'[{annotation}]'
else:
return annotation
parameter_annotation_dict = {
name: decorate(annotation, array) for name, annotation, array in
((parameter['name'], parameter['class'], parameter['array']) for parameter in parameters)
}
data_member_dict = {
name: decorate(annotation, array) for name, annotation, array in
((data_member['name'], data_member['class'], data_member['array']) for data_member in data_members)
}
cls = ModelMeta(
class_name,
tuple(util.class_lookup(module_name, base_class) for base_class in base_classes),
cls_dict,
parameters=parameter_annotation_dict,
data_members=data_member_dict
)
ClassBuilder.set_class_module(cls, module_name)
return cls
| [
"skeema.util.class_lookup",
"skeema.intermediate.compiler.parser.Parser.parse"
] | [((772, 799), 'skeema.intermediate.compiler.parser.Parser.parse', 'Parser.parse', (['cls', 'json_str'], {}), '(cls, json_str)\n', (784, 799), False, 'from skeema.intermediate.compiler.parser import Parser\n'), ((1547, 1589), 'skeema.util.class_lookup', 'util.class_lookup', (['module_name', 'base_class'], {}), '(module_name, base_class)\n', (1564, 1589), False, 'from skeema import util\n')] |
# -*- coding: utf-8 -*-
""" Auto Encoder Example.
Using an auto encoder on MNIST handwritten digits.
References:
<NAME>, <NAME>, <NAME>, and <NAME>. "Gradient-based
learning applied to document recognition." Proceedings of the IEEE,
86(11):2278-2324, November 1998.
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import tflearn
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)
# Building the encoder
encoder = tflearn.input_data(shape=[None, 784])
encoder = tflearn.fully_connected(encoder, 256)
encoder = tflearn.fully_connected(encoder, 64)
# Building the decoder
decoder = tflearn.fully_connected(encoder, 256)
decoder = tflearn.fully_connected(decoder, 784, activation='sigmoid')
# Regression, with mean square error
net = tflearn.regression(decoder, optimizer='adam', learning_rate=0.001,
loss='mean_square', metric=None)
# Training the auto encoder
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, X, n_epoch=20, validation_set=(testX, testX),
run_id="auto_encoder", batch_size=256)
# Encoding X[0] for test
print("\nTest encoding of X[0]:")
# New model, re-using the same session, for weights sharing
encoding_model = tflearn.DNN(encoder, session=model.session)
print(encoding_model.predict([X[0]]))
# Testing the image reconstruction on new data (test set)
print("\nVisualizing results after being encoded and decoded:")
testX = tflearn.data_utils.shuffle(testX)[0]
# Applying encode and decode over test set
encode_decode = model.predict(testX)
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(10):
temp = [[ii, ii, ii] for ii in list(testX[i])]
a[0][i].imshow(np.reshape(temp, (28, 28, 3)))
temp = [[ii, ii, ii] for ii in list(encode_decode[i])]
a[1][i].imshow(np.reshape(temp, (28, 28, 3)))
f.show()
plt.draw()
plt.waitforbuttonpress()
| [
"matplotlib.pyplot.draw",
"matplotlib.pyplot.waitforbuttonpress",
"numpy.reshape",
"tflearn.data_utils.shuffle",
"tflearn.datasets.mnist.load_data",
"tflearn.DNN",
"matplotlib.pyplot.subplots",
"tflearn.regression",
"tflearn.fully_connected",
"tflearn.input_data"
] | [((574, 603), 'tflearn.datasets.mnist.load_data', 'mnist.load_data', ([], {'one_hot': '(True)'}), '(one_hot=True)\n', (589, 603), True, 'import tflearn.datasets.mnist as mnist\n'), ((638, 675), 'tflearn.input_data', 'tflearn.input_data', ([], {'shape': '[None, 784]'}), '(shape=[None, 784])\n', (656, 675), False, 'import tflearn\n'), ((686, 723), 'tflearn.fully_connected', 'tflearn.fully_connected', (['encoder', '(256)'], {}), '(encoder, 256)\n', (709, 723), False, 'import tflearn\n'), ((734, 770), 'tflearn.fully_connected', 'tflearn.fully_connected', (['encoder', '(64)'], {}), '(encoder, 64)\n', (757, 770), False, 'import tflearn\n'), ((805, 842), 'tflearn.fully_connected', 'tflearn.fully_connected', (['encoder', '(256)'], {}), '(encoder, 256)\n', (828, 842), False, 'import tflearn\n'), ((853, 912), 'tflearn.fully_connected', 'tflearn.fully_connected', (['decoder', '(784)'], {'activation': '"""sigmoid"""'}), "(decoder, 784, activation='sigmoid')\n", (876, 912), False, 'import tflearn\n'), ((957, 1061), 'tflearn.regression', 'tflearn.regression', (['decoder'], {'optimizer': '"""adam"""', 'learning_rate': '(0.001)', 'loss': '"""mean_square"""', 'metric': 'None'}), "(decoder, optimizer='adam', learning_rate=0.001, loss=\n 'mean_square', metric=None)\n", (975, 1061), False, 'import tflearn\n'), ((1119, 1158), 'tflearn.DNN', 'tflearn.DNN', (['net'], {'tensorboard_verbose': '(0)'}), '(net, tensorboard_verbose=0)\n', (1130, 1158), False, 'import tflearn\n'), ((1404, 1447), 'tflearn.DNN', 'tflearn.DNN', (['encoder'], {'session': 'model.session'}), '(encoder, session=model.session)\n', (1415, 1447), False, 'import tflearn\n'), ((1794, 1830), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(10)'], {'figsize': '(10, 2)'}), '(2, 10, figsize=(10, 2))\n', (1806, 1830), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2080), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2078, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2081, 2105), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (2103, 2105), True, 'import matplotlib.pyplot as plt\n'), ((1617, 1650), 'tflearn.data_utils.shuffle', 'tflearn.data_utils.shuffle', (['testX'], {}), '(testX)\n', (1643, 1650), False, 'import tflearn\n'), ((1921, 1950), 'numpy.reshape', 'np.reshape', (['temp', '(28, 28, 3)'], {}), '(temp, (28, 28, 3))\n', (1931, 1950), True, 'import numpy as np\n'), ((2030, 2059), 'numpy.reshape', 'np.reshape', (['temp', '(28, 28, 3)'], {}), '(temp, (28, 28, 3))\n', (2040, 2059), True, 'import numpy as np\n')] |
"""
## ## ## ## ##
## ## ##
## ## ##
## ## ## ## ## ##
## ##
## ##
##
AUTHOR = <NAME> <<EMAIL>>
"""
import sys
import boto3
import click
import threading
from botocore.exceptions import ClientError
from secureaws import checkaws
from secureaws import setupaws
from secureaws import rsautil
# Important Variables - DO NOT change the values
REGION = {
"N_VIRGINIA": "us-east-1",
"OHIO": "us-east-2",
"N_CALIFORNIA": "us-west-1",
"OREGON": "us-west-2",
"MUMBAI": "ap-south-1",
"SEOUL": "ap-northeast-2",
"SINGAPORE": "ap-southeast-1",
"SYDNEY": "ap-southeast-2",
"TOKYO": "ap-northeast-1",
"CANADA": "ca-central-1",
"FRANKFURT": "eu-central-1",
"IRELAND": "eu-west-1",
"LONDON": "eu-west-2",
"PARIS": "eu-west-3",
"SAO_PAULO": "sa-east-1",
"BAHRAIN": "me-south-1",
"STOCKHOLM": "eu-north-1",
"HONG_KONG": "ap-east-1"
}
class secureaws:
region = ""
session = None
def __init__(self, access_key="", secret_key="", profile="", region=""):
self.region = region
try:
if access_key == "" and secret_key == "" and profile == "":
self.session = boto3.Session(region_name=region)
elif profile != "":
self.session = boto3.Session(profile_name=profile, region_name=region)
elif access_key != "" and secret_key != "":
self.session = boto3.Session(aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=region)
except Exception as e:
print("Error: {}".format(e))
exit(1)
def getSession(self):
return self.session
# Managing CLI
@click.group()
def chk_group():
pass
@chk_group.command()
@click.option('--access-key', help='AWS IAM User Access Key')
@click.option('--secret-key', help='AWS IAM User Access Key')
@click.option('--profile', help='AWS CLI profile')
@click.option('--region', default='us-east-1', help='AWS region identifier. Default: us-east-1')
def check(access_key, secret_key, profile, region):
'''
This command will scan your AWS account to identify whether basic security services are enabled or not.
\b
IAM Policy:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"cloudtrail:DescribeTrails",
"config:DescribeConfigurationRecorderStatus",
"ec2:DescribeFlowLogs",
"iam:GetAccountSummary",
"iam:GetAccountPasswordPolicy",
"macie:ListMemberAccounts",
"guardduty:ListDetectors",
"s3:ListAllMyBuckets",
"s3:GetEncryptionConfiguration",
"ec2:DescribeVolumes"
],
"Resource": "*"
}
]
}
\b
Usage:
- Scan AWS account using profile:
secureaws check --profile xxx --region xxx
- Scan AWS account using keys:
secureaws check --access-key xxx --secret-key xxx --region xxx
'''
secureaws_obj = secureaws(access_key, secret_key, profile, region)
checkaws.check_account(secureaws_obj.getSession())
@click.group()
def setup_group():
pass
@setup_group.command()
@click.option('--menu', is_flag=True, help='Display interactive menu to setup security services')
@click.option('--access-key', help='AWS IAM User Access Key')
@click.option('--secret-key', help='AWS IAM User Access Key')
@click.option('--profile', help='AWS CLI profile')
@click.option('--region', default='us-east-1', help='AWS region identifier. Default: us-east-1')
@click.option('--yes', '-y', 'non_interactive', is_flag=True, help='Non-interactive mode')
@click.option('--service', '-s', 'svc', multiple=True, help='Specific service name to setup')
@click.option('--bucket-name', multiple=True, help='Bucket name to encrypt. Only applicable for s3-sse')
@click.option('--instance-id', multiple=True, help='Instance ID (Required only for ebs-sse)')
@click.option('--volume-id', multiple=True, help='Volume ID (Required only for ebs-sse)')
@click.option('--kms-id', help='Supports both KMS Key ID or Alias. Only supported for s3-sse and ebs-sse')
def setup(menu, access_key, secret_key, profile, region, non_interactive, svc, bucket_name, instance_id, volume_id, kms_id):
'''
\b
This command supports securing following services on your AWS account:
- CloudTrail
- Config
- Flow Logs
- MFA (Default User: root)
- S3 SSE (Default: AES256)
- EBS SSE (Default: aws/ebs)
- Password Policy
\b
It is recommended to further restrict down the policy as per your need.
IAM Policy:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:CreateBucket",
"s3:PutEncryptionConfiguration",
"s3:ListAllMyBuckets",
"s3:PutBucketPolicy",
"s3:HeadBucket",
"cloudtrail:StartLogging",
"cloudtrail:CreateTrail",
"iam:CreateRole",
"iam:PassRole",
"iam:AttachRolePolicy",
"iam:CreatePolicy",
"iam:UpdateAccountPasswordPolicy",
"iam:CreateVirtualMFADevice",
"iam:EnableMFADevice",
"iam:GetUser",
"iam:ListMFADevices",
"config:StartConfigurationRecorder",
"config:PutDeliveryChannel",
"config:PutConfigurationRecorder",
"logs:CreateLogGroup",
"logs:DescribeLogGroups",
"ec2:CreateFlowLogs",
"ec2:DescribeVpcs",
"ec2:StopInstances",
"ec2:StartInstances",
"ec2:CreateSnapshot",
"ec2:CopySnapshot",
"ec2:CreateVolume",
"ec2:AttachVolume",
"ec2:DeleteVolume",
"ec2:DeleteSnapshot"
],
"Resource": "*"
}
]
}
\b
Service Names:
- cloudtrail
- config
- flowlogs
- mfa
- s3-sse
- ebs-sse
- password-policy
\b
Usage:
- Setup all services using AWS profile:
secureaws setup --profile xxx --region xxx
- Setup all services using AWS keys in non-interactive mode (except ebs-sse):
secureaws setup --access-key xxx --secret-key xxx --region xxx -y
- Setup specific service(s):
secureaws setup --profile xxx --service cloudtrail -s flowlogs -s mfa --region xxx
- Setup MFA for an Root user:
secureaws setup --profile xxx -s mfa
- Setup MFA for an IAM user:
secureaws setup --profile xxx -s mfa=username
- Encrypt all S3 buckets using KMS Key ID:
secureaws setup --profile xxx --region xxx -s s3-sse --kms-id xxx
- Encrypt specific S3 buckets using default encryption:
secureaws setup --profile xxx --region xxx -s s3-sse --bucket-name xxx --bucket-name xxx
- Encrypt EBS Volumes using Instance ID(s):
secureaws setup --profile xxx -s ebs-sse --instance-id xxx --region xxx
- Encrypt EBS Volumes using Volume ID(s) and KMS Alias:
secureaws setup --profile xxx -s ebs-sse --volume-id xxx --volume-id xxx --kms-id alias/xxx --region xxx
'''
secureaws_obj = secureaws(access_key, secret_key, profile, region)
if menu:
setupaws.secure_account_menu(secureaws_obj.getSession())
else:
setupaws.secure_account(secureaws_obj.getSession(), svc, buckets=bucket_name, instance_id=instance_id, volume_id=volume_id, kms_id=kms_id, non_interactive=non_interactive)
@click.group()
def rsa_group():
pass
@rsa_group.command()
@click.option('--file-name', help='File name for private and public key')
@click.option('--key-size', default=4096, help='Key size (Default: 4096)')
def genrsa(file_name, key_size):
'''
This will generate RSA key pair
'''
rsautil.create_rsa_key_pair(file_name, key_size)
# Map all click groups
sa = click.CommandCollection(sources=[chk_group,setup_group,rsa_group])
def main():
sa()
if __name__ == '__main__':
sa()
| [
"click.group",
"click.option",
"boto3.Session",
"click.CommandCollection",
"secureaws.rsautil.create_rsa_key_pair"
] | [((1743, 1756), 'click.group', 'click.group', ([], {}), '()\n', (1754, 1756), False, 'import click\n'), ((1806, 1866), 'click.option', 'click.option', (['"""--access-key"""'], {'help': '"""AWS IAM User Access Key"""'}), "('--access-key', help='AWS IAM User Access Key')\n", (1818, 1866), False, 'import click\n'), ((1868, 1928), 'click.option', 'click.option', (['"""--secret-key"""'], {'help': '"""AWS IAM User Access Key"""'}), "('--secret-key', help='AWS IAM User Access Key')\n", (1880, 1928), False, 'import click\n'), ((1930, 1979), 'click.option', 'click.option', (['"""--profile"""'], {'help': '"""AWS CLI profile"""'}), "('--profile', help='AWS CLI profile')\n", (1942, 1979), False, 'import click\n'), ((1981, 2081), 'click.option', 'click.option', (['"""--region"""'], {'default': '"""us-east-1"""', 'help': '"""AWS region identifier. Default: us-east-1"""'}), "('--region', default='us-east-1', help=\n 'AWS region identifier. Default: us-east-1')\n", (1993, 2081), False, 'import click\n'), ((3337, 3350), 'click.group', 'click.group', ([], {}), '()\n', (3348, 3350), False, 'import click\n'), ((3404, 3505), 'click.option', 'click.option', (['"""--menu"""'], {'is_flag': '(True)', 'help': '"""Display interactive menu to setup security services"""'}), "('--menu', is_flag=True, help=\n 'Display interactive menu to setup security services')\n", (3416, 3505), False, 'import click\n'), ((3502, 3562), 'click.option', 'click.option', (['"""--access-key"""'], {'help': '"""AWS IAM User Access Key"""'}), "('--access-key', help='AWS IAM User Access Key')\n", (3514, 3562), False, 'import click\n'), ((3564, 3624), 'click.option', 'click.option', (['"""--secret-key"""'], {'help': '"""AWS IAM User Access Key"""'}), "('--secret-key', help='AWS IAM User Access Key')\n", (3576, 3624), False, 'import click\n'), ((3626, 3675), 'click.option', 'click.option', (['"""--profile"""'], {'help': '"""AWS CLI profile"""'}), "('--profile', help='AWS CLI profile')\n", (3638, 3675), False, 'import click\n'), ((3677, 3777), 'click.option', 'click.option', (['"""--region"""'], {'default': '"""us-east-1"""', 'help': '"""AWS region identifier. Default: us-east-1"""'}), "('--region', default='us-east-1', help=\n 'AWS region identifier. Default: us-east-1')\n", (3689, 3777), False, 'import click\n'), ((3774, 3868), 'click.option', 'click.option', (['"""--yes"""', '"""-y"""', '"""non_interactive"""'], {'is_flag': '(True)', 'help': '"""Non-interactive mode"""'}), "('--yes', '-y', 'non_interactive', is_flag=True, help=\n 'Non-interactive mode')\n", (3786, 3868), False, 'import click\n'), ((3865, 3962), 'click.option', 'click.option', (['"""--service"""', '"""-s"""', '"""svc"""'], {'multiple': '(True)', 'help': '"""Specific service name to setup"""'}), "('--service', '-s', 'svc', multiple=True, help=\n 'Specific service name to setup')\n", (3877, 3962), False, 'import click\n'), ((3959, 4067), 'click.option', 'click.option', (['"""--bucket-name"""'], {'multiple': '(True)', 'help': '"""Bucket name to encrypt. Only applicable for s3-sse"""'}), "('--bucket-name', multiple=True, help=\n 'Bucket name to encrypt. Only applicable for s3-sse')\n", (3971, 4067), False, 'import click\n'), ((4064, 4161), 'click.option', 'click.option', (['"""--instance-id"""'], {'multiple': '(True)', 'help': '"""Instance ID (Required only for ebs-sse)"""'}), "('--instance-id', multiple=True, help=\n 'Instance ID (Required only for ebs-sse)')\n", (4076, 4161), False, 'import click\n'), ((4158, 4251), 'click.option', 'click.option', (['"""--volume-id"""'], {'multiple': '(True)', 'help': '"""Volume ID (Required only for ebs-sse)"""'}), "('--volume-id', multiple=True, help=\n 'Volume ID (Required only for ebs-sse)')\n", (4170, 4251), False, 'import click\n'), ((4248, 4358), 'click.option', 'click.option', (['"""--kms-id"""'], {'help': '"""Supports both KMS Key ID or Alias. Only supported for s3-sse and ebs-sse"""'}), "('--kms-id', help=\n 'Supports both KMS Key ID or Alias. Only supported for s3-sse and ebs-sse')\n", (4260, 4358), False, 'import click\n'), ((8024, 8037), 'click.group', 'click.group', ([], {}), '()\n', (8035, 8037), False, 'import click\n'), ((8087, 8159), 'click.option', 'click.option', (['"""--file-name"""'], {'help': '"""File name for private and public key"""'}), "('--file-name', help='File name for private and public key')\n", (8099, 8159), False, 'import click\n'), ((8161, 8234), 'click.option', 'click.option', (['"""--key-size"""'], {'default': '(4096)', 'help': '"""Key size (Default: 4096)"""'}), "('--key-size', default=4096, help='Key size (Default: 4096)')\n", (8173, 8234), False, 'import click\n'), ((8402, 8470), 'click.CommandCollection', 'click.CommandCollection', ([], {'sources': '[chk_group, setup_group, rsa_group]'}), '(sources=[chk_group, setup_group, rsa_group])\n', (8425, 8470), False, 'import click\n'), ((8324, 8372), 'secureaws.rsautil.create_rsa_key_pair', 'rsautil.create_rsa_key_pair', (['file_name', 'key_size'], {}), '(file_name, key_size)\n', (8351, 8372), False, 'from secureaws import rsautil\n'), ((1241, 1274), 'boto3.Session', 'boto3.Session', ([], {'region_name': 'region'}), '(region_name=region)\n', (1254, 1274), False, 'import boto3\n'), ((1338, 1393), 'boto3.Session', 'boto3.Session', ([], {'profile_name': 'profile', 'region_name': 'region'}), '(profile_name=profile, region_name=region)\n', (1351, 1393), False, 'import boto3\n'), ((1481, 1583), 'boto3.Session', 'boto3.Session', ([], {'aws_access_key_id': 'access_key', 'aws_secret_access_key': 'secret_key', 'region_name': 'region'}), '(aws_access_key_id=access_key, aws_secret_access_key=\n secret_key, region_name=region)\n', (1494, 1583), False, 'import boto3\n')] |
# Generated by Django 3.0.4 on 2020-05-04 18:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20200323_0327'),
]
operations = [
migrations.AlterField(
model_name='movieentry',
name='date_watched',
field=models.DateField(blank=True, null=True),
),
]
| [
"django.db.models.DateField"
] | [((343, 382), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (359, 382), False, 'from django.db import migrations, models\n')] |
import logging
import couchdb
from collections import deque
from threading import Thread
from pylons import config
from lr.lib import SpecValidationException, helpers as h
from lr.lib.couch_change_monitor import BaseChangeHandler
from lr.model import ResourceDataModel
from couchdb import ResourceConflict
from lr.lib.replacement_helper import ResourceDataReplacement
from lr.lib.schema_helper import ResourceDataModelValidator
log = logging.getLogger(__name__)
# this doesn't need to be done... should be handled by pylons.config
# scriptPath = os.path.dirname(os.path.abspath(__file__))
# _PYLONS_CONFIG = os.path.join(scriptPath, '..', '..', '..', 'development.ini')
# _config = ConfigParser.ConfigParser()
# _config.read(_PYLONS_CONFIG)
_RESOURCE_DISTRIBUTABLE_TYPE = "resource_data_distributable"
_RESOURCE_TYPE = "resource_data"
_DOC_TYPE = "doc_type"
_DOC = "doc"
_ID = "id"
_DOCUMENT_UPDATE_THRESHOLD = 100
class IncomingCopyHandler(BaseChangeHandler):
def __init__(self):
self._serverUrl = config["couchdb.url.dbadmin"]
self._targetName = config["couchdb.db.resourcedata"]
self.documents = deque()
s = couchdb.Server(self._serverUrl)
self._db = s[self._targetName]
self.repl_helper = ResourceDataReplacement()
self.threads = {}
self.max_threads = 50
def _canHandle(self, change, database):
if ((_DOC in change) and \
(change[_DOC].get(_DOC_TYPE) == _RESOURCE_DISTRIBUTABLE_TYPE or \
change[_DOC].get(_DOC_TYPE) == _RESOURCE_TYPE)):
return True
return False
def _handle(self, change, database):
def threadName(doc):
return "T-"+doc["_id"]
def handleDocument(newDoc):
should_delete = True
try:
# newDoc['node_timestamp'] = h.nowToISO8601Zformat()
ResourceDataModelValidator.set_timestamps(newDoc)
del newDoc["_rev"]
self.repl_helper.handle(newDoc)
# rd = ResourceDataModel(newDoc)
# rd.save(log_exceptions=False)
except SpecValidationException as e:
log.error("SpecValidationException: %s, %s",newDoc['_id'],str(e))
except couchdb.ResourceConflict as rc:
log.error("Document conflicts", exc_info=1)
except Exception as ex:
should_delete = False # don't delete something unexpected happend
log.error("Unable to save %s", newDoc['_id'], exc_info=ex)
if should_delete:
try:
del database[newDoc['_id']]
except Exception as ex:
log.error("Error when deleting", exc_info=ex)
try:
del self.threads[threadName(newDoc)]
except:
pass
self.documents.append(change[_DOC])
if len(self.documents) >= _DOCUMENT_UPDATE_THRESHOLD or len(self.documents) >= database.info()['doc_count']:
while len(self.documents) > 0:
doc = self.documents.popleft()
tname = threadName(doc)
t = Thread(target=handleDocument, name=tname, args=(doc,))
self.threads[tname] = t
t.start()
while len(self.threads) > self.max_threads:
time.sleep(.1)
def isRunning(self):
return len(self.threads) > 0
def threadCount(self):
return len(self.threads)
| [
"logging.getLogger",
"collections.deque",
"lr.lib.replacement_helper.ResourceDataReplacement",
"lr.lib.schema_helper.ResourceDataModelValidator.set_timestamps",
"threading.Thread",
"couchdb.Server"
] | [((435, 462), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (452, 462), False, 'import logging\n'), ((1134, 1141), 'collections.deque', 'deque', ([], {}), '()\n', (1139, 1141), False, 'from collections import deque\n'), ((1154, 1185), 'couchdb.Server', 'couchdb.Server', (['self._serverUrl'], {}), '(self._serverUrl)\n', (1168, 1185), False, 'import couchdb\n'), ((1252, 1277), 'lr.lib.replacement_helper.ResourceDataReplacement', 'ResourceDataReplacement', ([], {}), '()\n', (1275, 1277), False, 'from lr.lib.replacement_helper import ResourceDataReplacement\n'), ((1876, 1925), 'lr.lib.schema_helper.ResourceDataModelValidator.set_timestamps', 'ResourceDataModelValidator.set_timestamps', (['newDoc'], {}), '(newDoc)\n', (1917, 1925), False, 'from lr.lib.schema_helper import ResourceDataModelValidator\n'), ((3190, 3244), 'threading.Thread', 'Thread', ([], {'target': 'handleDocument', 'name': 'tname', 'args': '(doc,)'}), '(target=handleDocument, name=tname, args=(doc,))\n', (3196, 3244), False, 'from threading import Thread\n')] |
import os
from argparse import ArgumentParser
from pathlib import Path
from general_utils import split_hparams_string, split_int_set_str
# from tacotron.app.eval_checkpoints import eval_checkpoints
from tacotron.app import (DEFAULT_MAX_DECODER_STEPS, continue_train, infer,
plot_embeddings, train, validate)
from tacotron.app.defaults import (DEFAULT_MCD_NO_OF_COEFFS_PER_FRAME,
DEFAULT_REPETITIONS,
DEFAULT_SAVE_MEL_INFO_COPY_PATH,
DEFAULT_SEED)
BASE_DIR_VAR = "base_dir"
def init_plot_emb_parser(parser) -> None:
parser.add_argument('--train_name', type=str, required=True)
parser.add_argument('--custom_checkpoint', type=int)
return plot_embeddings
# def init_eval_checkpoints_parser(parser):
# parser.add_argument('--train_name', type=str, required=True)
# parser.add_argument('--custom_hparams', type=str)
# parser.add_argument('--select', type=int)
# parser.add_argument('--min_it', type=int)
# parser.add_argument('--max_it', type=int)
# return eval_checkpoints_main_cli
# def evaeckpoints_main_cli(**args):
# argsl_ch["custom_hparams"] = split_hparams_string(args["custom_hparams"])
# eval_checkpoints(**args)
# def init_restore_parser(parser: ArgumentParser) -> None:
# parser.add_argument('--train_name', type=str, required=True)
# parser.add_argument('--checkpoint_dir', type=Path, required=True)
# return restore_model
def init_train_parser(parser: ArgumentParser) -> None:
parser.add_argument('--ttsp_dir', type=Path, required=True)
parser.add_argument('--train_name', type=str, required=True)
parser.add_argument('--merge_name', type=str, required=True)
parser.add_argument('--prep_name', type=str, required=True)
parser.add_argument('--warm_start_train_name', type=str)
parser.add_argument('--warm_start_checkpoint', type=int)
parser.add_argument('--custom_hparams', type=str)
parser.add_argument('--weights_train_name', type=str)
parser.add_argument('--weights_checkpoint', type=int)
parser.add_argument('--map_from_speaker', type=str)
parser.add_argument('--map_symbol_weights', action='store_true')
parser.add_argument('--use_weights_map', action='store_true')
return train_cli
def train_cli(**args) -> None:
args["custom_hparams"] = split_hparams_string(args["custom_hparams"])
train(**args)
def init_continue_train_parser(parser: ArgumentParser) -> None:
parser.add_argument('--train_name', type=str, required=True)
parser.add_argument('--custom_hparams', type=str)
return continue_train_cli
def continue_train_cli(**args) -> None:
args["custom_hparams"] = split_hparams_string(args["custom_hparams"])
continue_train(**args)
def init_validate_parser(parser: ArgumentParser) -> None:
parser.add_argument('--train_name', type=str, required=True)
parser.add_argument('--entry_ids', type=str, help="Utterance ids or nothing if random")
parser.add_argument('--speaker', type=str, help="ds_name,speaker_name")
parser.add_argument('--ds', type=str, help="Choose if validation- or testset should be taken.",
choices=["val", "test"], default="val")
parser.add_argument('--custom_checkpoints', type=str)
parser.add_argument('--full_run', action='store_true')
parser.add_argument('--max_decoder_steps', type=int, default=DEFAULT_MAX_DECODER_STEPS)
parser.add_argument('--copy_mel_info_to', type=str, default=DEFAULT_SAVE_MEL_INFO_COPY_PATH)
parser.add_argument('--custom_hparams', type=str)
parser.add_argument('--select_best_from', type=str)
parser.add_argument('--mcd_no_of_coeffs_per_frame', type=int,
default=DEFAULT_MCD_NO_OF_COEFFS_PER_FRAME)
parser.add_argument('--fast', action='store_true')
parser.add_argument('--repetitions', type=int, default=DEFAULT_REPETITIONS)
parser.add_argument('--seed', type=int, default=DEFAULT_SEED)
return validate_cli
def validate_cli(**args) -> None:
args["custom_hparams"] = split_hparams_string(args["custom_hparams"])
args["entry_ids"] = split_int_set_str(args["entry_ids"])
args["custom_checkpoints"] = split_int_set_str(args["custom_checkpoints"])
validate(**args)
def init_inference_parser(parser: ArgumentParser) -> None:
parser.add_argument('--train_name', type=str, required=True)
parser.add_argument('--text_name', type=str, required=True)
parser.add_argument('--speaker', type=str, required=True, help="ds_name,speaker_name")
parser.add_argument('--utterance_ids', type=str)
parser.add_argument('--custom_checkpoint', type=int)
parser.add_argument('--custom_hparams', type=str)
parser.add_argument('--full_run', action='store_true')
parser.add_argument('--max_decoder_steps', type=int, default=DEFAULT_MAX_DECODER_STEPS)
parser.add_argument('--seed', type=int, default=DEFAULT_SEED)
parser.add_argument('--copy_mel_info_to', type=str, default=DEFAULT_SAVE_MEL_INFO_COPY_PATH)
return infer_cli
def infer_cli(**args) -> None:
args["custom_hparams"] = split_hparams_string(args["custom_hparams"])
args["utterance_ids"] = split_int_set_str(args["utterance_ids"])
infer(**args)
def add_base_dir(parser: ArgumentParser) -> None:
assert BASE_DIR_VAR in os.environ.keys()
base_dir = Path(os.environ[BASE_DIR_VAR])
parser.set_defaults(base_dir=base_dir)
def _add_parser_to(subparsers, name: str, init_method) -> None:
parser = subparsers.add_parser(name, help=f"{name} help")
invoke_method = init_method(parser)
parser.set_defaults(invoke_handler=invoke_method)
add_base_dir(parser)
return parser
def _init_parser():
result = ArgumentParser()
subparsers = result.add_subparsers(help='sub-command help')
_add_parser_to(subparsers, "train", init_train_parser)
_add_parser_to(subparsers, "continue-train", init_continue_train_parser)
_add_parser_to(subparsers, "validate", init_validate_parser)
_add_parser_to(subparsers, "infer", init_inference_parser)
# _add_parser_to(subparsers, "eval-checkpoints", init_taco_eval_checkpoints_parser)
_add_parser_to(subparsers, "plot-embeddings", init_plot_emb_parser)
#_add_parser_to(subparsers, "restore", init_restore_parser)
return result
def _process_args(args) -> None:
params = vars(args)
invoke_handler = params.pop("invoke_handler")
invoke_handler(**params)
if __name__ == "__main__":
main_parser = _init_parser()
received_args = main_parser.parse_args()
_process_args(received_args)
| [
"os.environ.keys",
"tacotron.app.validate",
"pathlib.Path",
"general_utils.split_int_set_str",
"argparse.ArgumentParser",
"tacotron.app.infer",
"general_utils.split_hparams_string",
"tacotron.app.continue_train",
"tacotron.app.train"
] | [((2354, 2398), 'general_utils.split_hparams_string', 'split_hparams_string', (["args['custom_hparams']"], {}), "(args['custom_hparams'])\n", (2374, 2398), False, 'from general_utils import split_hparams_string, split_int_set_str\n'), ((2401, 2414), 'tacotron.app.train', 'train', ([], {}), '(**args)\n', (2406, 2414), False, 'from tacotron.app import DEFAULT_MAX_DECODER_STEPS, continue_train, infer, plot_embeddings, train, validate\n'), ((2693, 2737), 'general_utils.split_hparams_string', 'split_hparams_string', (["args['custom_hparams']"], {}), "(args['custom_hparams'])\n", (2713, 2737), False, 'from general_utils import split_hparams_string, split_int_set_str\n'), ((2740, 2762), 'tacotron.app.continue_train', 'continue_train', ([], {}), '(**args)\n', (2754, 2762), False, 'from tacotron.app import DEFAULT_MAX_DECODER_STEPS, continue_train, infer, plot_embeddings, train, validate\n'), ((4025, 4069), 'general_utils.split_hparams_string', 'split_hparams_string', (["args['custom_hparams']"], {}), "(args['custom_hparams'])\n", (4045, 4069), False, 'from general_utils import split_hparams_string, split_int_set_str\n'), ((4092, 4128), 'general_utils.split_int_set_str', 'split_int_set_str', (["args['entry_ids']"], {}), "(args['entry_ids'])\n", (4109, 4128), False, 'from general_utils import split_hparams_string, split_int_set_str\n'), ((4160, 4205), 'general_utils.split_int_set_str', 'split_int_set_str', (["args['custom_checkpoints']"], {}), "(args['custom_checkpoints'])\n", (4177, 4205), False, 'from general_utils import split_hparams_string, split_int_set_str\n'), ((4208, 4224), 'tacotron.app.validate', 'validate', ([], {}), '(**args)\n', (4216, 4224), False, 'from tacotron.app import DEFAULT_MAX_DECODER_STEPS, continue_train, infer, plot_embeddings, train, validate\n'), ((5044, 5088), 'general_utils.split_hparams_string', 'split_hparams_string', (["args['custom_hparams']"], {}), "(args['custom_hparams'])\n", (5064, 5088), False, 'from general_utils import split_hparams_string, split_int_set_str\n'), ((5115, 5155), 'general_utils.split_int_set_str', 'split_int_set_str', (["args['utterance_ids']"], {}), "(args['utterance_ids'])\n", (5132, 5155), False, 'from general_utils import split_hparams_string, split_int_set_str\n'), ((5158, 5171), 'tacotron.app.infer', 'infer', ([], {}), '(**args)\n', (5163, 5171), False, 'from tacotron.app import DEFAULT_MAX_DECODER_STEPS, continue_train, infer, plot_embeddings, train, validate\n'), ((5280, 5310), 'pathlib.Path', 'Path', (['os.environ[BASE_DIR_VAR]'], {}), '(os.environ[BASE_DIR_VAR])\n', (5284, 5310), False, 'from pathlib import Path\n'), ((5640, 5656), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (5654, 5656), False, 'from argparse import ArgumentParser\n'), ((5249, 5266), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (5264, 5266), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
@author: Jim
@project: tornado_learning
@time: 2019/8/20 14:48
@desc:
上传文件
"""
from __future__ import annotations
from tornado_learning.handler import BaseHandler
import os
import uuid
import aiofiles
class UploadHandler(BaseHandler):
async def post(self):
ret_data = {}
files_meta = self.request.files.get("front_image", None)
if not files_meta:
self.set_status(400)
ret_data["front_image"] = "请上传图片"
else:
for meta in files_meta:
filename = meta["filename"]
new_filename = "{uuid}_{filename}".format(uuid=uuid.uuid1(), filename=filename)
file_path = os.path.join(self.settings["MEDIA_ROOT"], new_filename)
async with aiofiles.open(file_path, "wb") as f:
await f.write(meta["body"])
ret_data['file_path'] = file_path
return self.finish(ret_data)
| [
"uuid.uuid1",
"os.path.join",
"aiofiles.open"
] | [((711, 766), 'os.path.join', 'os.path.join', (["self.settings['MEDIA_ROOT']", 'new_filename'], {}), "(self.settings['MEDIA_ROOT'], new_filename)\n", (723, 766), False, 'import os\n'), ((795, 825), 'aiofiles.open', 'aiofiles.open', (['file_path', '"""wb"""'], {}), "(file_path, 'wb')\n", (808, 825), False, 'import aiofiles\n'), ((650, 662), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (660, 662), False, 'import uuid\n')] |
"""Main module
"""
# Standard library imports
import string
# Third party imports
import numpy as np
import justpy as jp
import pandas as pd
START_INDEX: int = 1
END_INDEX: int = 20
GRID_OPTIONS = """
{
class: 'ag-theme-alpine',
defaultColDef: {
filter: true,
sortable: false,
resizable: true,
headerClass: 'font-bold',
editable: true
},
rowSelection: 'single',
}
"""
def on_input_key(self, msg):
"""On input key event.
Update the clicked cell with the new value from the input field.
Args:
msg (object): Event data object.
"""
if self.last_cell is not None:
self.grid.options['rowData'][self.last_cell['row']
][self.last_cell['col']] = msg.value
def on_cell_clicked(self, msg):
"""On cell clicked event.
Update the cell label value with the coordinates of the cell and set
the value of the cell in the input field.
Args:
msg (object): Event data object.
"""
self.cell_label.value = msg.colId + str(msg.rowIndex)
self.input_field.value = msg.data[msg.colId]
self.input_field.last_cell = {"row": msg.rowIndex, "col": msg.colId}
self.last_row = msg.row
def on_cell_value_changed(self, msg):
"""On input key event.
Update the input field value to match the cell value.
Args:
msg (object): Event data object.
"""
self.input_field.value = msg.data[msg.colId]
def grid_test():
"""Grid test app.
"""
headings = list(string.ascii_uppercase)
index = np.arange(START_INDEX, END_INDEX)
data_frame = pd.DataFrame(index=index, columns=headings)
data_frame = data_frame.fillna('')
# data = np.array([np.arange(10)]*3).T
# css_values = """
# .ag-theme-alpine .ag-ltr .ag-cell {
# border-right: 1px solid #aaa;
# }
# .ag-theme-balham .ag-ltr .ag-cell {
# border-right: 1px solid #aaa;
# }
# """
web_page = jp.WebPage()
root_div = jp.Div(classes='q-pa-md', a=web_page)
in_root_div = jp.Div(classes='q-gutter-md', a=root_div)
cell_label = jp.Input(
a=in_root_div, style='width: 32px; margin-left: 16px', disabled=True)
input_field = jp.Input(classes=jp.Styles.input_classes,
a=in_root_div, width='32px')
input_field.on("input", on_input_key)
input_field.last_cell = None
grid = jp.AgGrid(a=web_page, options=GRID_OPTIONS)
grid.load_pandas_frame(data_frame)
grid.options.pagination = True
grid.options.paginationAutoPageSize = True
grid.cell_label = cell_label
grid.input_field = input_field
grid.on('cellClicked', on_cell_clicked)
grid.on('cellValueChanged', on_cell_value_changed)
input_field.grid = grid
return web_page
def main():
"""Main app.
"""
jp.justpy(grid_test)
if __name__ == "__main__":
main()
| [
"justpy.WebPage",
"justpy.justpy",
"justpy.Input",
"justpy.Div",
"pandas.DataFrame",
"justpy.AgGrid",
"numpy.arange"
] | [((1576, 1609), 'numpy.arange', 'np.arange', (['START_INDEX', 'END_INDEX'], {}), '(START_INDEX, END_INDEX)\n', (1585, 1609), True, 'import numpy as np\n'), ((1628, 1671), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index', 'columns': 'headings'}), '(index=index, columns=headings)\n', (1640, 1671), True, 'import pandas as pd\n'), ((1985, 1997), 'justpy.WebPage', 'jp.WebPage', ([], {}), '()\n', (1995, 1997), True, 'import justpy as jp\n'), ((2014, 2051), 'justpy.Div', 'jp.Div', ([], {'classes': '"""q-pa-md"""', 'a': 'web_page'}), "(classes='q-pa-md', a=web_page)\n", (2020, 2051), True, 'import justpy as jp\n'), ((2070, 2111), 'justpy.Div', 'jp.Div', ([], {'classes': '"""q-gutter-md"""', 'a': 'root_div'}), "(classes='q-gutter-md', a=root_div)\n", (2076, 2111), True, 'import justpy as jp\n'), ((2129, 2207), 'justpy.Input', 'jp.Input', ([], {'a': 'in_root_div', 'style': '"""width: 32px; margin-left: 16px"""', 'disabled': '(True)'}), "(a=in_root_div, style='width: 32px; margin-left: 16px', disabled=True)\n", (2137, 2207), True, 'import justpy as jp\n'), ((2235, 2305), 'justpy.Input', 'jp.Input', ([], {'classes': 'jp.Styles.input_classes', 'a': 'in_root_div', 'width': '"""32px"""'}), "(classes=jp.Styles.input_classes, a=in_root_div, width='32px')\n", (2243, 2305), True, 'import justpy as jp\n'), ((2420, 2463), 'justpy.AgGrid', 'jp.AgGrid', ([], {'a': 'web_page', 'options': 'GRID_OPTIONS'}), '(a=web_page, options=GRID_OPTIONS)\n', (2429, 2463), True, 'import justpy as jp\n'), ((2845, 2865), 'justpy.justpy', 'jp.justpy', (['grid_test'], {}), '(grid_test)\n', (2854, 2865), True, 'import justpy as jp\n')] |
""" Unit tests for linked_queue.LinkedQueue """
from dloud_ads import linked_queue
def test_dummy():
""" Test definition"""
the_queue = linked_queue.LinkedQueue()
assert the_queue.is_empty()
assert not the_queue
the_queue.enqueue(2)
assert not the_queue.is_empty()
assert len(the_queue) == 1
assert the_queue.dequeue() == 2
_ = [the_queue.enqueue(x) for x in range(4)]
assert len(the_queue) == 4
assert [the_queue.dequeue() for x in range(4)] == [0, 1, 2, 3]
assert not the_queue
_ = [the_queue.enqueue(x) for x in range(9)]
assert len(the_queue) == 9
_ = [the_queue.enqueue(x) for x in range(2)]
assert len(the_queue) == 11
expected = [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1]
assert [the_queue.dequeue() for x in range(11)] == expected
| [
"dloud_ads.linked_queue.LinkedQueue"
] | [((147, 173), 'dloud_ads.linked_queue.LinkedQueue', 'linked_queue.LinkedQueue', ([], {}), '()\n', (171, 173), False, 'from dloud_ads import linked_queue\n')] |
import os
import re
# To use a consistent encoding
from codecs import open as copen
from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with copen(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
def read(*parts):
with copen(os.path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
__version__ = find_version("grape", "__version__.py")
test_deps = []
# TODO: Authors add your emails!!!
authors = {
"<NAME>": "<EMAIL>",
"<NAME>": "<EMAIL>",
"<NAME>": "<EMAIL>",
"<NAME>": "<EMAIL>",
}
setup(
name='grape',
version=__version__,
description="Rust/Python for high performance Graph Processing and Embedding.",
long_description=long_description,
url="https://github.com/AnacletoLAB/grape",
author=", ".join(list(authors.keys())),
author_email=", ".join(list(authors.values())),
# Choose your license
license='MIT',
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
],
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
tests_require=test_deps,
install_requires=[
"ensmallen==0.7.0.dev6",
"embiggen==0.10.0.dev2",
]
)
| [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join",
"re.search"
] | [((171, 193), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (183, 193), False, 'from os import path\n'), ((534, 607), 're.search', 're.search', (['"""^__version__ = [\'\\\\"]([^\'\\\\"]*)[\'\\\\"]"""', 'version_file', 're.M'], {}), '(\'^__version__ = [\\\'\\\\"]([^\\\'\\\\"]*)[\\\'\\\\"]\', version_file, re.M)\n', (543, 607), False, 'import re\n'), ((257, 286), 'os.path.join', 'path.join', (['here', '"""README.rst"""'], {}), "(here, 'README.rst')\n", (266, 286), False, 'from os import path\n'), ((1544, 1596), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['contrib', 'docs', 'tests*']"}), "(exclude=['contrib', 'docs', 'tests*'])\n", (1557, 1596), False, 'from setuptools import find_packages, setup\n'), ((379, 405), 'os.path.join', 'os.path.join', (['here', '*parts'], {}), '(here, *parts)\n', (391, 405), False, 'import os\n')] |