index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
7,760
|
joanmadsen/newproject2
|
refs/heads/master
|
/hellow_world_app/admin.py
|
from django.contrib import admin
#import model
from hellow_world_app.models import Rock
#set up automated slug creation
class RockAdmin(admin.ModelAdmin):
model = Rock
list_display = ('name', 'description')
prepopulated_fields = {'slug': ('name',)}
# Register your models here.
admin.site.register(Rock, RockAdmin)
|
{"/newproject2/urls.py": ["/hellow_world_app/views.py"], "/hellow_world_app/views.py": ["/hellow_world_app/forms.py"]}
|
7,761
|
joanmadsen/newproject2
|
refs/heads/master
|
/hellow_world_app/forms.py
|
from django.forms import ModelForm
from hellow_world_app.models import Rock
class CreateRockForm(ModelForm):
class Meta:
model = Rock
field = ['name', 'description']
exclude = []
form = CreateRockForm()
|
{"/newproject2/urls.py": ["/hellow_world_app/views.py"], "/hellow_world_app/views.py": ["/hellow_world_app/forms.py"]}
|
7,762
|
loys-caucheteux/Hackatown2021-citysupplier
|
refs/heads/master
|
/map/admin.py
|
from django.contrib import admin
from .models import magBase, userBase
admin.site.register(magBase)
admin.site.register(userBase)
# Register your models here.
|
{"/map/admin.py": ["/map/models.py"], "/map/views.py": ["/map/models.py", "/map/forms.py"]}
|
7,763
|
loys-caucheteux/Hackatown2021-citysupplier
|
refs/heads/master
|
/map/forms.py
|
from django import forms
from django.forms.utils import ErrorList
class ParagraphErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self: return ''
return '<div class="errorlist">%s</div>' % ''.join(['<p class="small error">%s</p>' % e for e in self])
class RegisterForm(forms.Form):
name = forms.CharField(
label='Name',
max_length=100,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'})
)
surname = forms.CharField(
label='Surname',
max_length=100,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'})
)
adress = forms.CharField(
label='Adresse sous la forme <Numéro> <Rue> <Code Postal>',
max_length=100,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'})
)
mail = forms.EmailField(
label='Email',
max_length=100,
required=True,
widget=forms.EmailInput(attrs={'class': 'form-control'})
)
pwd = forms.CharField(
label='Password',
max_length=100,
required=True,
widget=forms.PasswordInput(attrs={'class': 'form-control'})
)
class LoginForm(forms.Form):
mail = forms.EmailField(
label='Email',
max_length=100,
required=True,
widget=forms.EmailInput(attrs={'class': 'form-control'})
)
pwd = forms.CharField(
label='Password',
max_length=100,
required=True,
widget=forms.PasswordInput(attrs={'class': 'form-control'})
)
class RegisterMagForm(forms.Form):
name = forms.CharField(
max_length=50,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'})
)
adress = forms.CharField(
max_length=200,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'})
)
|
{"/map/admin.py": ["/map/models.py"], "/map/views.py": ["/map/models.py", "/map/forms.py"]}
|
7,764
|
loys-caucheteux/Hackatown2021-citysupplier
|
refs/heads/master
|
/map/migrations/0001_initial.py
|
# Generated by Django 3.1.6 on 2021-02-07 15:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='userBase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100)),
('mail', models.EmailField(max_length=254, unique=True)),
('pwd', models.CharField(max_length=200)),
('adress', models.CharField(max_length=200)),
('created_on', models.DateField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='magBase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('lat', models.DecimalField(decimal_places=20, default=0.0, max_digits=25)),
('lon', models.DecimalField(decimal_places=20, default=0.0, max_digits=25)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='map.userbase')),
],
),
]
|
{"/map/admin.py": ["/map/models.py"], "/map/views.py": ["/map/models.py", "/map/forms.py"]}
|
7,765
|
loys-caucheteux/Hackatown2021-citysupplier
|
refs/heads/master
|
/map/models.py
|
from django.db import models
from django.contrib.auth.hashers import make_password
class userBase(models.Model):
name = models.CharField(max_length=100, null=False)
surname = models.CharField(max_length=100)
mail = models.EmailField(unique=True, null=False)
pwd = models.CharField(max_length=200, null=False)
adress = models.CharField(max_length=200)
created_on = models.DateField(auto_now_add=True)
lat = models.DecimalField(null=False, max_digits=25, decimal_places=20, default=0.0)
lon = models.DecimalField(null=False, max_digits=25, decimal_places=20, default=0.0)
def __str__(self):
return '%s %s' % (self.name, self.surname)
class Meta:
verbose_name = "utilisateur"
class magBase(models.Model):
name = models.CharField(max_length=200)
user = models.ForeignKey(userBase, unique=False, null=True, on_delete=models.SET_NULL)
lat = models.DecimalField(null=False, max_digits=25, decimal_places=20, default=0.0)
lon = models.DecimalField(null=False, max_digits=25, decimal_places=20, default=0.0)
adress = models.CharField(max_length=200, null=False, default="48 5th Avenue NYC")
class Meta:
verbose_name = "magasin"
|
{"/map/admin.py": ["/map/models.py"], "/map/views.py": ["/map/models.py", "/map/forms.py"]}
|
7,766
|
loys-caucheteux/Hackatown2021-citysupplier
|
refs/heads/master
|
/VidaLocal/views.py
|
#from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
message = "Bienvenue sur VidaLocal"
return HttpResponse(message)
|
{"/map/admin.py": ["/map/models.py"], "/map/views.py": ["/map/models.py", "/map/forms.py"]}
|
7,767
|
loys-caucheteux/Hackatown2021-citysupplier
|
refs/heads/master
|
/map/urls.py
|
from django.urls import include, path
from . import views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
app_name = 'map'
urlpatterns = [
path('', views.index),
path('register/', views.register, name='register'),
path('login/', views.login, name='login'),
path('new_store/', views.registerMag, name='registerMag'),
path('map/', views.show_map, name='map')
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
{"/map/admin.py": ["/map/models.py"], "/map/views.py": ["/map/models.py", "/map/forms.py"]}
|
7,768
|
loys-caucheteux/Hackatown2021-citysupplier
|
refs/heads/master
|
/map/migrations/0003_magbase_adress.py
|
# Generated by Django 3.1.6 on 2021-02-07 22:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('map', '0002_auto_20210207_1702'),
]
operations = [
migrations.AddField(
model_name='magbase',
name='adress',
field=models.CharField(default='48 5th Avenue NYC', max_length=200),
),
]
|
{"/map/admin.py": ["/map/models.py"], "/map/views.py": ["/map/models.py", "/map/forms.py"]}
|
7,769
|
loys-caucheteux/Hackatown2021-citysupplier
|
refs/heads/master
|
/map/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from .models import userBase, magBase
from .forms import RegisterForm, ParagraphErrorList, LoginForm, RegisterMagForm
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.sessions.models import Session
from . import funcs
import folium
from folium.plugins import BeautifyIcon
from django.contrib.auth import logout
from django.views.generic import TemplateView
import os
from django.conf import settings
from django.conf.urls.static import static
from django.db import IntegrityError, transaction
from branca.element import Figure
# Create your views here.
def index(request):
try:
a = request.session['logged']
except KeyError:
request.session['logged'] = False
try:
if request.GET['logout']:
logout(request)
request.session['logged'] = False
except KeyError:
a = 0
template = loader.get_template('map/index.html')
return HttpResponse(template.render(request=request))
def register(request):
try:
a = request.session['logged']
except KeyError:
request.session['logged'] = False
if request.session['logged'] == False:
if request.method == 'POST':
form = RegisterForm(request.POST, error_class=ParagraphErrorList)
if form.is_valid():
email = form.cleaned_data['mail']
pwd = form.cleaned_data['pwd']
name = form.cleaned_data['name']
surname = form.cleaned_data['surname']
adress = form.cleaned_data['adress']
latitude=funcs.getlat_fadress(adress)
longitude=funcs.getlon_fadress(adress)
if latitude == 1084 and longitude == 1084:
context = {
'form' : RegisterForm(),
'adress': True
}
return render(request, 'map/register.html', context)
user = userBase.objects.filter(mail=email)
if not user.exists():
user = userBase.objects.create(
name=name,
surname=surname,
mail=email,
pwd=pwd,
adress=adress,
lat=funcs.getlat_fadress(adress),
lon=funcs.getlon_fadress(adress)
)
return render(request, 'map/index.html')
else:
context = {
'form' : RegisterForm(),
'used': True
}
return render(request, 'map/register.html', context)
else:
context['errors'] = form.errors.items()
else:
form = RegisterForm()
else:
return render(request, 'map/access.html')
context = {'form' : RegisterForm()}
return render(request, 'map/register.html', context)
def login(request):
f = 0
try:
a = request.session['logged']
except KeyError:
request.session['logged'] = False
if request.session['logged'] == False:
if request.method == 'POST':
form = LoginForm(request.POST, error_class=ParagraphErrorList)
if form.is_valid():
email = form.cleaned_data['mail']
pwd = form.cleaned_data['pwd']
try:
with transaction.atomic():
user = userBase.objects.filter(mail=email)
except IntegrityError:
form.errors['internal'] = "Une erreur interne est apparue. Merci de recommencer votre requête."
f = 1
if f != 1 and user.exists():
try:
getpwd = user.get(pwd=pwd)
except ObjectDoesNotExist:
context = {'form' : LoginForm(), 'fail' : True}
return (render(request, 'map/login.html', context))
request.session['logged'] = True
request.session['name'] = getpwd.name
request.session['surname'] = getpwd.surname
request.session['mail'] = getpwd.mail
return (render(request, 'map/index.html'))
else:
context = {'form' : LoginForm(), 'fail' : True}
return (render(request, 'map/login.html', context))
else:
context['errors'] = form.errors.items()
else:
form = LoginForm()
else:
return render(request, 'map/access.html')
context = {
'form' : LoginForm(),
'fail' : False
}
return render(request, 'map/login.html', context)
def registerMag(request):
try:
a = request.session['logged']
except KeyError:
request.session['logged'] = False
if request.session['logged']:
if request.method == 'POST':
form = RegisterMagForm(request.POST, error_class=ParagraphErrorList)
if form.is_valid():
name = form.cleaned_data['name']
adress = form.cleaned_data['adress']
user = userBase.objects.filter(mail=request.session['mail'])
getuser = user.get(mail=request.session['mail'])
lat = funcs.getlat_fadress(adress)
lon = funcs.getlon_fadress(adress)
mag = magBase.objects.filter(lat=lat, lon=lon)
if not mag.exists():
mag = magBase.objects.create(
name=name,
user=getuser,
lat=lat,
lon=lon,
adress=adress
)
return render(request, 'map/index.html')
else:
context = {
'form' : RegisterMagForm(),
'used': True
}
return render(request, 'map/registerMag.html', context)
else:
context['errors'] = form.errors.items()
else:
form = RegisterMagForm()
else:
return render(request, 'map/access.html')
context = {'form' : RegisterMagForm()}
return render(request, 'map/registerMag.html', context)
def show_map(request):
#creation of map comes here + business logic
try:
a = request.session['logged']
except KeyError:
request.session['logged'] = False
if request.session['logged']:
user = userBase.objects.filter(mail=request.session['mail']).get()
store = magBase.objects.values_list('adress', flat=True)
store = list(store)
m = folium.Map(
location=[user.lat, user.lon],
zoom_start=15,
tiles='Stamen Terrain',
min_zoom=8,
width=1200, height=600
)
folium.TileLayer('openstreetmap').add_to(m)
number_icon = BeautifyIcon(icon='angle-double-down', icon_shape='marker',text_color="#000", border_color="transparent",
background_color="#22F", inner_icon_style="font-size:12px;padding-top:-5px;")
folium.Marker(
location=[user.lat, user.lon],
popup=folium.Popup(str(user.adress+'\nChez vous'), width=200),
icon=number_icon).add_to(m)
for item in store:
mag = magBase.objects.filter(adress=item).first()
number_icon = BeautifyIcon(icon='angle-double-down', icon_shape='marker',text_color="#000", border_color="transparent",
background_color="#2D2", inner_icon_style="font-size:12px;padding-top:-5px;")
folium.Marker(
location=[funcs.getlat_fadress(item), funcs.getlon_fadress(item)],
popup=folium.Popup(str(item + '\n' + mag.name), width=200),
icon=number_icon).add_to(m)
m=m._repr_html_() #updated
context = {'my_map': m}
return render(request, 'map/map.html', context)
else:
return render(request, 'map/access.html')
|
{"/map/admin.py": ["/map/models.py"], "/map/views.py": ["/map/models.py", "/map/forms.py"]}
|
7,770
|
loys-caucheteux/Hackatown2021-citysupplier
|
refs/heads/master
|
/map/funcs.py
|
from geopy.geocoders import Nominatim
def getlat_fadress(adress):
geoloc = Nominatim(user_agent="map")
location = geoloc.geocode(adress)
try:
return location.latitude
except AttributeError:
return 1084
def getlon_fadress(adress):
geoloc = Nominatim(user_agent="map")
location = geoloc.geocode(adress)
try:
return location.longitude
except AttributeError:
return 1084
|
{"/map/admin.py": ["/map/models.py"], "/map/views.py": ["/map/models.py", "/map/forms.py"]}
|
7,771
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/utils.py
|
import os
import codecs
import logging as log
from collections import defaultdict
import jieba
import torch
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
def sent_tokenize(sent, se=False):
SOS_TOK, EOS_TOK = '<SOS>', '<EOS>'
if isinstance(sent, str):
# sent = jieba.lcut(sent)
sent = [sent[i] for i in range(len(sent))]
elif isinstance(sent, list):
assert isinstance(sent[0], str), "Invalid sentence found!"
if se: sent = [SOS_TOK] + sent + [EOS_TOK]
return sent
def calc_tfidf_matrix(corpus, max_features, stop_words='english'):
corpus = [' '.join(sent) for sent in corpus]
vectorizer = CountVectorizer(
# max_df= .999,
# min_df = .001,
max_features=max_features
)
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(
vectorizer.fit_transform(corpus)
)
word_list = vectorizer.get_feature_names()
log.info("Finished building a word list of size %i" % len(word_list))
return tfidf, word_list
def load_word_vector(data_dir, d_feature):
word2idx = {}
vectors = []
with open(os.path.join(data_dir, 'sgns.weibo.word'), 'r', encoding='utf-8') as f:
vocab_size, dim = map(int, f.readline().strip().split(' '))
assert dim == d_feature
for idx in range(vocab_size):
line = f.readline().strip().split(' ')
word2idx[line[0]] = idx
vectors.append(torch.tensor(list(map(float, line[1:]))))
return word2idx, vectors
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,772
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/train.py
|
import os
import itertools
import logging as log
import torch
import torch.nn.functional as F
from torchtext.data import Iterator, BucketIterator
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from metrics import Metrics
from evaluate import evaluate
def train(args, model, task):
metrics = Metrics()
writer = None #SummaryWriter(os.path.join(args.run_dir, 'tensorboard'))
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
log.info('Start to train')
n_passes = 0
epoch = 0
for epoch in range(args.n_epochs):
# while True:
data_iter = Iterator(
# data_loader = DataLoader(
task.train_set,
args.batch_size,
# collate_fn=task.collate_fn,
device=args.device,
shuffle=True,
)
for batch in data_iter:
# for batch in data_loader:
texts, targs = batch.text, batch.targ
inputs = texts if args.model != 'MLP' else None
# texts, targs = batch
# inputs, targs = batch
# targs = targs.to(args.device)
model.train()
optimizer.zero_grad()
if args.model == 'Seq2Seq':
_, _, loss = model(inputs)
else:
preds = model(inputs)
loss = F.cross_entropy(preds, targs)
metrics.count(preds, targs)
loss.backward()
optimizer.step()
n_passes += 1
# log train
if n_passes % args.log_every == 0:
# report = metrics.report(reset=True)
# report += [('loss', loss.item())]
# writer.add_scalars('train', dict(report), n_passes)
# log.info('Pass #%i train: %s' % (n_passes, str(report)))
log.info('Epoch #%i Pass #%i train: %f' % (epoch, n_passes, loss.item()))
# save model
# if n_passes % args.save_every == 0:
# torch.save(model.state_dict(), os.path.join(args.run_dir, 'params_%i.model' % n_passes))
# evaluate
if n_passes % args.eval_every == 0:
evaluate(args, model, task, tensorboard_writer=writer, n_passes=n_passes)
epoch += 1
torch.save(model.state_dict(), os.path.join(args.run_dir, 'params_%i.model' % epoch))
log.info('Finished training')
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,773
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/seq2vec.py
|
import os
import pickle
import itertools
import logging as log
import torch
import torch.nn.functional as F
from torchtext.data import Iterator
from torch.utils.data import DataLoader
def seq2vec(args, model, task):
log.info('Starting to computing features')
model.eval()
for split in ['train', 'test']:
dataset = task.train_set if split == 'train' else task.test_set
data_iter = Iterator(
dataset,
args.batch_size,
device=args.device,
shuffle=False
)
feats_list = []
for batch in data_iter:
inputs, targs = batch.text, batch.targ
if args.model == 'Seq2Seq':
feats, _, _ = model(inputs)
feats = feats[-1]
else:
feats = model.seq2vec(inputs)
feats_list.append(feats)
feats = torch.cat(feats_list, dim=0)
feats = feats.detach().cpu().numpy()
print('feats shape', feats.shape)
with open(os.path.join(args.data_dir, 'feats_%s' % split), 'wb') as f:
pickle.dump(feats, f)
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,774
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/metrics.py
|
import torch
class CategoricalAccuracy():
"""
https://github.com/allenai/allennlp/blob/master/allennlp/training/metrics/categorical_accuracy.py#L11-L103
Categorical Top-K accuracy. Assumes integer labels, with
each item to be classified having a single correct class.
Tie break enables equal distribution of scores among the
classes with same maximum predsicted scores.
"""
def __init__(self, top_k: int = 1, tie_break: bool = False) -> None:
if top_k > 1 and tie_break:
raise ValueError("Tie break in Categorical Accuracy "
"can be done only for maximum (top_k = 1)")
if top_k <= 0:
raise ValueError("top_k passed to Categorical Accuracy must be > 0")
self._top_k = top_k
self._tie_break = tie_break
self.correct_count = 0.
self.total_count = 0.
def __call__(self,
predsictions: torch.Tensor,
gold_labels: torch.Tensor,
# mask: Optional[torch.Tensor] = None):
mask=None):
"""
Parameters
----------
predsictions : ``torch.Tensor``, required.
A tensor of predsictions of shape (batch_size, ..., num_classes).
gold_labels : ``torch.Tensor``, required.
A tensor of integer class label of shape (batch_size, ...). It must be the same
shape as the ``predsictions`` tensor without the ``num_classes`` dimension.
mask: ``torch.Tensor``, optional (default = None).
A masking tensor the same size as ``gold_labels``.
"""
# predsictions, gold_labels, mask = self.unwrap_to_tensors(predsictions, gold_labels, mask)
# Some sanity checks.
num_classes = predsictions.size(-1)
if gold_labels.dim() != predsictions.dim() - 1:
raise ValueError("gold_labels must have dimension == predsictions.size() - 1 but "
"found tensor of shape: {}".format(predsictions.size()))
if (gold_labels >= num_classes).any():
raise ValueError("A gold label passed to Categorical Accuracy contains an id >= {}, "
"the number of classes.".format(num_classes))
predsictions = predsictions.view((-1, num_classes))
gold_labels = gold_labels.view(-1).long()
if not self._tie_break:
# Top K indexes of the predsictions (or fewer, if there aren't K of them).
# Special case topk == 1, because it's common and .max() is much faster than .topk().
if self._top_k == 1:
top_k = predsictions.max(-1)[1].unsqueeze(-1)
else:
top_k = predsictions.topk(min(self._top_k, predsictions.shape[-1]), -1)[1]
# This is of shape (batch_size, ..., top_k).
correct = top_k.eq(gold_labels.unsqueeze(-1)).float()
else:
# predsiction is correct if gold label falls on any of the max scores. distribute score by tie_counts
max_predsictions = predsictions.max(-1)[0]
max_predsictions_mask = predsictions.eq(max_predsictions.unsqueeze(-1))
# max_predsictions_mask is (rows X num_classes) and gold_labels is (batch_size)
# ith entry in gold_labels points to index (0-num_classes) for ith row in max_predsictions
# For each row check if index pointed by gold_label is was 1 or not (among max scored classes)
correct = max_predsictions_mask[torch.arange(gold_labels.numel()).long(), gold_labels].float()
tie_counts = max_predsictions_mask.sum(-1)
correct /= tie_counts.float()
correct.unsqueeze_(-1)
if mask is not None:
correct *= mask.view(-1, 1).float()
self.total_count += mask.sum()
else:
self.total_count += gold_labels.numel()
self.correct_count += correct.sum()
def get_metric(self, reset: bool = False):
"""
Returns
-------
The accumulated accuracy.
"""
if self.total_count > 1e-12:
accuracy = float(self.correct_count) / float(self.total_count)
else:
accuracy = 0.0
if reset:
self.reset()
return accuracy
def reset(self):
self.correct_count = 0.0
self.total_count = 0.0
class F1Measure():
"""
https://github.com/allenai/allennlp/blob/master/allennlp/training/metrics/f1_measure.py#L10-L94
Computes Precision, Recall and F1 with respect to a given ``positive_label``.
For example, for a BIO tagging scheme, you would pass the classification index of
the tag you are interested in, resulting in the Precision, Recall and F1 score being
calculated for this tag only.
"""
def __init__(self, positive_label: int = 1) -> None:
self._positive_label = positive_label
self._true_positives = 0.0
self._true_negatives = 0.0
self._false_positives = 0.0
self._false_negatives = 0.0
def __call__(self,
predsictions: torch.Tensor,
gold_labels: torch.Tensor,
# mask: Optional[torch.Tensor] = None):
mask=None):
"""
Parameters
----------
predsictions : ``torch.Tensor``, required.
A tensor of predsictions of shape (batch_size, ..., num_classes).
gold_labels : ``torch.Tensor``, required.
A tensor of integer class label of shape (batch_size, ...). It must be the same
shape as the ``predsictions`` tensor without the ``num_classes`` dimension.
mask: ``torch.Tensor``, optional (default = None).
A masking tensor the same size as ``gold_labels``.
"""
# predsictions, gold_labels, mask = self.unwrap_to_tensors(predsictions, gold_labels, mask)
num_classes = predsictions.size(-1)
if (gold_labels >= num_classes).any():
raise ValueError("A gold label passed to F1Measure contains an id >= {}, "
"the number of classes.".format(num_classes))
if mask is None:
mask = torch.ones_like(gold_labels)
mask = mask.float()
gold_labels = gold_labels.float()
positive_label_mask = gold_labels.eq(self._positive_label).float()
negative_label_mask = 1.0 - positive_label_mask
argmax_predsictions = predsictions.max(-1)[1].float().squeeze(-1)
# True Negatives: correct non-positive predsictions.
correct_null_predsictions = (argmax_predsictions !=
self._positive_label).float() * negative_label_mask
self._true_negatives += (correct_null_predsictions.float() * mask).sum()
# True Positives: correct positively labeled predsictions.
correct_non_null_predsictions = (argmax_predsictions ==
self._positive_label).float() * positive_label_mask
self._true_positives += (correct_non_null_predsictions * mask).sum()
# False Negatives: incorrect negatively labeled predsictions.
incorrect_null_predsictions = (argmax_predsictions !=
self._positive_label).float() * positive_label_mask
self._false_negatives += (incorrect_null_predsictions * mask).sum()
# False Positives: incorrect positively labeled predsictions
incorrect_non_null_predsictions = (argmax_predsictions ==
self._positive_label).float() * negative_label_mask
self._false_positives += (incorrect_non_null_predsictions * mask).sum()
def get_metric(self, reset: bool = False):
"""
Returns
-------
A tuple of the following metrics based on the accumulated count statistics:
precision : float
recall : float
f1-measure : float
"""
precision = float(self._true_positives) / float(self._true_positives + self._false_positives + 1e-13)
recall = float(self._true_positives) / float(self._true_positives + self._false_negatives + 1e-13)
f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))
if reset:
self.reset()
return precision, recall, f1_measure
def reset(self):
self._true_positives = 0.0
self._true_negatives = 0.0
self._false_positives = 0.0
self._false_negatives = 0.0
class Metrics():
def __init__(self):
self.metrics = [CategoricalAccuracy(), F1Measure()]
def count(self, preds, targs):
for metric in self.metrics:
metric(preds, targs)
def report(self, reset=False):
report = []
for metric in self.metrics:
_ = metric.get_metric(reset=False)
if isinstance(metric, CategoricalAccuracy): report.append(('acc', _))
elif isinstance(metric, F1Measure): report += [('rec', _[0]), ('pre', _[1]), ('f1', _[2])]
else: raise NotImplementedError
return report
def reset(self):
for metric in self.metrics:
metric.reset()
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,775
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/evaluate.py
|
import logging as log
import torch
from torchtext.data import Iterator, BucketIterator
from torch.utils.data import DataLoader
from metrics import Metrics
def evaluate(args, model, task, tensorboard_writer=None, n_passes=-1):
model.eval()
metrics = Metrics()
data_iter = Iterator(
# data_loader = DataLoader(
task.test_set,
args.batch_size,
# collate_fn=task.collate_fn,
device=args.device,
shuffle=False
)
preds_list = []
for batch in data_iter:
# for batch in data_loader:
inputs, targs = batch.text, batch.targ
# inputs, targs = batch
# targs = targs.to(args.device)
if args.model == 'Seq2Seq':
hidden, pred, loss = model(inputs)
else:
preds = model(inputs)
preds_list.append(preds)
metrics.count(preds, targs)
print(inputs[0:10], '\n', pred[0:10])
# report = metrics.report(reset=True)
# log.info('Pass #%i evaluate: %s' % (n_passes, str(report)))
log.info('Pass #%i evaluate: %f' % (n_passes, loss.item()))
# if tensorboard_writer is not None:
# tensorboard_writer.add_scalars('evaluate', dict(report), n_passes)
return preds_list
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,776
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/models/trans.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import logging as log
from .modules import TransformerEncoder
class Transformer(nn.Module):
def __init__(self, args, task):
super().__init__()
self.vocab = task.vocab
self.embedding = nn.Embedding(
len(self.vocab),
args.d_feature,
# _weight=self.vocab.vectors
)
self.trans = TransformerEncoder(
dimension=args.d_feature,
n_heads=4,
hidden=args.d_hidden,
num_layers=args.n_layers,
dropout=args.dropout
)
self.output = nn.Linear(
args.d_hidden * (args.n_layers + 1),
task.n_classes
)
self.device = args.device
self.to(self.device)
def forward(self, x):
'''
inputs:
x: batch_size x seq_len
task_idx: int
'''
x = x.to(self.device)
batch_size = x.shape[0]
x = self.embedding(x)
x = self.trans(x)
x = [x[i][:,-1,:] for i in range(len(x))]
x = torch.cat(x, dim=1)
x = self.output(x) # batch_size x n_classes
return torch.softmax(x, dim=1) # batch_size x n_classes
def seq2vec(self, x):
x = x.to(self.device)
batch_size = x.shape[0]
x = self.embedding(x)
x = self.trans(x)
x = [x[i][:,-1,:] for i in range(len(x))]
x = torch.cat(x, dim=1)
return x
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,777
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/dataset.py
|
import os
import itertools
import logging as log
import torch
import torch.utils.data as data
from torchtext import data, datasets
from sklearn.feature_extraction.text import TfidfVectorizer
from utils import sent_tokenize, calc_tfidf_matrix, load_word_vector
class SeqTask():
def __init__(self, args):
self.n_classes = 2
text_field = data.Field(
sequential=True,
init_token='<SOS>',
eos_token='<EOS>',
lower=True,
tokenize=sent_tokenize,
pad_first=False,
batch_first=True
)
targ_field = data.Field(
sequential=False,
use_vocab=False,
is_target=True
)
self.train_set = data.TabularDataset(
path=os.path.join(args.data_dir, 'train_shuffle.txt'),
format='tsv',
fields=[('targ', targ_field), ('text', text_field)])
self.test_set = data.TabularDataset(
path=os.path.join(args.data_dir, 'test_shuffle.txt'),
format='tsv',
fields=[('targ', targ_field), ('text', text_field)])
text_field.build_vocab(self.train_set)
self.vocab = text_field.vocab
log.info('Finished building a vocab of size %i' % len(self.vocab))
word2idx, vectors = load_word_vector(args.data_dir, args.d_feature)
self.vocab.set_vectors(word2idx, vectors, dim=args.d_feature)
class NonSeqTask():
def __init__(self, args):
self.n_classes = 2
# load data
corpus = []
# with open(os.path.join(args.data_dir, 'stop_words.txt'), 'r') as f:
# stop_words = f.readlines()
# stop_words = [line.strip() for line in stop_words]
# stop_words = []
for split in ['train', 'test']:
file_name = split + '_shuffle.txt'
with open(os.path.join(args.data_dir, file_name), 'r') as f:
lines = f.readlines() # targ \t text
lines = [line.strip().split('\t') for line in lines] # [[targ, text], ...]
print('Finished loading file %s' % file_name)
texts = [sent_tokenize(text) for targ, text in lines] # [[word0, word1], ...]
# texts = [[word for word in sent if word not in stop_words] for sent in texts]
targs = [int(targ) for targ, text in lines] # [targs]
exec(split + '_texts = texts')
exec(split + '_targs = targs')
corpus += texts
class Dataset(data.Dataset):
def __init__(self, inputs, targs):
self.inputs = inputs
self.targs = targs
def __len__(self):
return self.inputs.shape[0]
def __getitem__(self, index):
if self.targs is None: return self.inputs[index], None
else: return self.inputs[index], self.targs[index]
# form dataset
tfidf, word_list = calc_tfidf_matrix(corpus, args.d_feature, stop_words=None)
args.d_feature = len(word_list)
for split in ['train', 'test']:
texts = eval(split + '_texts')
targs = eval(split + '_targs')
split_size = len(texts)
inputs = tfidf[:split_size]
tfidf = tfidf[split_size:]
setattr(self, split + '_set', Dataset(inputs, targs))
log.info('Finished building datasets')
print(len(self.train_set), len(self.test_set))
def collate_fn(self, batch):
inputs, targs = [], []
for x, y in batch:
x = torch.tensor(x.toarray(), dtype=torch.float)
if y is not None: y = torch.tensor(y, dtype=torch.long)
inputs.append(x)
targs.append(y)
inputs = torch.cat(inputs)
if targs[0] is not None: targs = torch.stack(targs)
else: targs = None
return inputs, targs
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,778
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/models/seq2seq.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import logging as log
class Seq2Seq(nn.Module):
def __init__(self, args, task):
super().__init__()
self.vocab = task.vocab
self.vocab_size = len(self.vocab)
self.d_feature = args.d_feature
self.embedding = nn.Embedding(
self.vocab_size,
args.d_feature,
_weight=self.vocab.vectors
)
self.encoder = nn.GRU(
args.d_feature,
args.d_hidden,
args.n_layers,
batch_first=True,
bidirectional=False
)
self.decoder = nn.GRU(
args.d_feature,
args.d_hidden,
args.n_layers,
batch_first=True,
bidirectional=False
)
self.output2word = nn.Linear(
args.d_hidden,
self.vocab_size
)
self.device = args.device
self.to(self.device)
def forward(self, sents):
'''
Parameters
------------------
sents: batch_size x seq_len
'''
sents = sents.to(self.device)
batch_size = sents.shape[0]
seq_len = sents.shape[1]
# encoding
embeded = self.embedding(sents) # batch_size x seq_len x d_feature
_, hidden = self.encoder(embeded) # n_directions * n_layers x batch_size x d_hidden
# decoding
# embeded = self.embedding(sents[:,:-1])
embeded = torch.zeros(batch_size, seq_len - 1, self.d_feature).to(self.device)
output, _ = self.decoder(embeded, hidden) # batch_size x seq_len x n_directions * d_hidden
pred = self.output2word(output) # batch_size x seq_len x vocab_size
pred = F.softmax(pred, dim=-1).view(-1, self.vocab_size)
loss = F.cross_entropy(pred, sents[:,1:].contiguous().view(-1), ignore_index=1)
pred = pred.argmax(dim=-1).view(batch_size, -1)
return hidden, pred, loss
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,779
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/models/modules.py
|
import os
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(
x.contiguous().view(-1, size[-1])).view(*size[:-1], -1)
class Feedforward(nn.Module):
def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2):
super().__init__()
if activation is not None:
self.activation = getattr(torch, activation)
else:
self.activation = lambda x: x
self.linear = Linear(d_in, d_out, bias=bias)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.activation(self.linear(self.dropout(x)))
# torch.matmul can't do (4, 3, 2) @ (4, 2) -> (4, 3)
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Attention(nn.Module):
def __init__(self, d_key, dropout_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(dropout_ratio)
self.causal = causal
def forward(self, query, key, value, padding=None):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and self.causal:
tri = key.new_ones((key.size(1), key.size(1))).triu(1) * INF
dot_products.sub_(tri.unsqueeze(0))
if not padding is None:
dot_products.masked_fill_(padding.unsqueeze(1).expand_as(dot_products), -INF)
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim=-1)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, dropout_ratio, causal=causal)
self.wq = Linear(d_key, d_key, bias=False)
self.wk = Linear(d_key, d_key, bias=False)
self.wv = Linear(d_value, d_value, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value, padding=None):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (
x.chunk(self.n_heads, -1) for x in (query, key, value))
return torch.cat([self.attention(q, k, v, padding=padding)
for q, k, v in zip(query, key, value)], -1)
class LinearReLU(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.feedforward = Feedforward(d_model, d_hidden, activation='relu')
self.linear = Linear(d_hidden, d_model)
def forward(self, x, padding=None):
return self.linear(self.feedforward(x))
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-6):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ResidualBlock(nn.Module):
def __init__(self, layer, d_model, dropout_ratio):
super().__init__()
self.layer = layer
self.dropout = nn.Dropout(dropout_ratio)
self.layernorm = LayerNorm(d_model)
def forward(self, *x, padding=None):
return self.layernorm(x[0] + self.dropout(self.layer(*x, padding=padding)))
class TransformerEncoderLayer(nn.Module):
def __init__(self, dimension, n_heads, hidden, dropout):
super().__init__()
self.selfattn = ResidualBlock(
MultiHead(
dimension, dimension, n_heads, dropout),
dimension, dropout)
self.feedforward = ResidualBlock(
LinearReLU(dimension, hidden),
dimension, dropout)
def forward(self, x, padding=None):
return self.feedforward(self.selfattn(x, x, x, padding=padding))
class TransformerEncoder(nn.Module):
def __init__(self, dimension, n_heads, hidden, num_layers, dropout):
super().__init__()
self.layers = nn.ModuleList(
[TransformerEncoderLayer(dimension, n_heads, hidden, dropout) for i in range(num_layers)])
self.dropout = nn.Dropout(dropout)
def forward(self, x, padding=None):
x = self.dropout(x)
encoding = [x]
for layer in self.layers:
x = layer(x, padding=padding)
encoding.append(x)
return encoding
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,780
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/models/mlp.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, args, task):
super().__init__()
self.linears = nn.Sequential(
nn.Linear(args.d_feature, args.d_hidden), nn.ReLU(), nn.Dropout(p=args.dropout), # input
*[nn.Sequential(
nn.Linear(args.d_hidden, args.d_hidden), nn.ReLU(), nn.Dropout(p=args.dropout) # hiddens
) for _ in range(args.n_layers)]
)
self.output = nn.Linear(args.d_hidden, task.n_classes)
self.device = args.device
self.to(self.device)
def forward(self, x):
x = x.to(self.device)
x = self.linears(x) # batch_size x d_hidden
x = self.output(x) # batch_size x n_classes
return F.sigmoid(x) # batch_size x n_classes
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,781
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/models/__init__.py
|
from .mlp import MLP
from .bilstm import BiLSTM
from .seq2seq import Seq2Seq
from .trans import Transformer
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,782
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/models/bilstm.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import logging as log
class BiLSTM(nn.Module):
def __init__(self, args, task):
super().__init__()
self.vocab = task.vocab
self.embedding = nn.Embedding(
len(self.vocab),
args.d_feature,
_weight=self.vocab.vectors
)
self.bilstm = nn.LSTM(
args.d_feature,
args.d_hidden,
args.n_layers,
batch_first=True,
bidirectional=True
)
self.output = nn.Linear(
args.d_hidden * args.n_layers * 2,
task.n_classes
)
self.device = args.device
self.to(self.device)
def forward(self, x):
'''
inputs:
x: batch_size x seq_len
task_idx: int
'''
x = x.to(self.device)
batch_size = x.shape[0]
x = self.embedding(x)
x, (h_n, c_n) = self.bilstm(x) # n_layers * 2 x batch_size x d_hidden
h_n = h_n.transpose(0, 1).contiguous().view(batch_size, -1) # batch_size x d_hidden * n_layers * 2
x = self.output(h_n) # batch_size x n_classes
return torch.softmax(x, dim=1) # batch_size x n_classes
def seq2vec(self, x):
x = x.to(self.device)
batch_size = x.shape[0]
x = self.embedding(x)
x, (h_n, c_n) = self.bilstm(x) # n_layers * 2 x batch_size x d_hidden
h_n = h_n.transpose(0, 1).contiguous().view(batch_size, -1) # batch_size x d_hidden * n_layers * 2
return h_n
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,783
|
yutxie/text-classification-CS420-baselines
|
refs/heads/master
|
/main.py
|
import os
import time
import argparse
import logging as log
import torch
import models
import dataset
from train import train
from evaluate import evaluate
from seq2vec import seq2vec
parser = argparse.ArgumentParser(description='Text Classification')
# environment
parser.add_argument('--device', type=int, default=3, help='gpu device id, -1 if cpu')
# log
parser.add_argument('--log_every', type=int, default=200, help='log train how many every passes')
parser.add_argument('--eval_every', type=int, default=2000, help='evaluate how many every passes')
# parser.add_argument('--save_every', type=int, default=1000, help='save model how many every passes')
# train
parser.add_argument('--n_epochs', type=int, default=1000, help='how many epochs')
parser.add_argument('--batch_size', type=int, default=32, help='how many instances in a batch')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
# data
parser.add_argument('--data_dir', type=str, default='data/')
parser.add_argument('--run_dir', type=str, default='run/')
# model
parser.add_argument('--model', type=str, default='Seq2Seq')
parser.add_argument('--d_feature', type=int, default=300)
parser.add_argument('--d_hidden', type=int, default=300)
parser.add_argument('--n_layers', type=int, default=1)
parser.add_argument('--dropout', type=float, default=.5)
args = parser.parse_args()
# make dirs
os.makedirs(args.run_dir, exist_ok=True)
# set log
log.basicConfig(
format='%(asctime)s: %(message)s',
datefmt='%m/%d %I:%M:%S %p', level=log.INFO)
log.getLogger().addHandler(log.FileHandler(os.path.join(args.run_dir, 'log.txt'), mode='w'))
log.info(str(vars(args)))
# parse device
args.device = 'cpu' if args.device < 0 else 'cuda:%i' % args.device
args.device = torch.device(args.device)
#########################################
if __name__ == "__main__":
# dataset
Task = dataset.NonSeqTask \
if args.model == 'MLP' \
else dataset.SeqTask
task = Task(args)
# model
Model = getattr(models, args.model)
model = Model(args, task)
model.load_state_dict(torch.load(os.path.join(args.run_dir, 'params_lang.model')))
# train
train(args, model, task)
# preds = evaluate(args, model, task)
# preds = torch.cat(preds)
# preds = preds[:,1]
seq2vec(args, model, task)
# with open(os.path.join(args.run_dir, 'submission.csv'), 'w') as f:
# f.write('id,pred\n')
# for idx in range(preds.shape[0]):
# f.write('%i,%f\n' % (idx, preds[idx]))
|
{"/train.py": ["/metrics.py", "/evaluate.py"], "/evaluate.py": ["/metrics.py"], "/models/trans.py": ["/models/modules.py"], "/dataset.py": ["/utils.py"], "/models/__init__.py": ["/models/mlp.py", "/models/bilstm.py", "/models/seq2seq.py", "/models/trans.py"], "/main.py": ["/models/__init__.py", "/dataset.py", "/train.py", "/evaluate.py", "/seq2vec.py"]}
|
7,784
|
Yoshinori-Koide-PRO/indy-plenum
|
refs/heads/master
|
/plenum/server/models.py
|
"""
Some model objects used in Plenum protocol.
"""
import time
from typing import NamedTuple, Set, Optional, Any, Dict, Callable
from plenum.common.messages.node_messages import Prepare, Commit
from stp_core.common.log import getlogger
logger = getlogger()
ThreePhaseVotes = NamedTuple("ThreePhaseVotes", [
("voters", Set[str]),
("msg", Optional[Any])])
class TrackedMsgs(dict):
def _get_key(self, msg):
raise NotImplementedError
def _new_vote_msg(self, msg):
return ThreePhaseVotes(voters=set(), msg=msg)
def _add_msg(self, msg, voter: str):
key = self._get_key(msg)
if key not in self:
self[key] = self._new_vote_msg(msg)
self[key].voters.add(voter)
def _has_msg(self, msg) -> bool:
key = self._get_key(msg)
return key in self
def _has_vote(self, msg, voter: str) -> bool:
key = self._get_key(msg)
return key in self and voter in self[key].voters
def _votes_count(self, msg) -> int:
key = self._get_key(msg)
if key not in self:
return 0
return len(self[key].voters)
def _has_enough_votes(self, msg, count) -> bool:
return self._votes_count(msg) >= count
class Prepares(TrackedMsgs):
"""
Dictionary of received Prepare requests. Key of dictionary is a 2
element tuple with elements viewNo, seqNo and value is a 2 element
tuple containing request digest and set of sender node names(sender
replica names in case of multiple protocol instances)
(viewNo, seqNo) -> (digest, {senders})
"""
def _get_key(self, prepare):
return prepare.viewNo, prepare.ppSeqNo
# noinspection PyMethodMayBeStatic
def addVote(self, prepare: Prepare, voter: str) -> None:
"""
Add the specified PREPARE to this replica's list of received
PREPAREs.
:param prepare: the PREPARE to add to the list
:param voter: the name of the node who sent the PREPARE
"""
self._add_msg(prepare, voter)
# noinspection PyMethodMayBeStatic
def hasPrepare(self, prepare: Prepare) -> bool:
return super()._has_msg(prepare)
# noinspection PyMethodMayBeStatic
def hasPrepareFrom(self, prepare: Prepare, voter: str) -> bool:
return super()._has_vote(prepare, voter)
def hasQuorum(self, prepare: Prepare, quorum: int) -> bool:
return self._has_enough_votes(prepare, quorum)
class Commits(TrackedMsgs):
"""
Dictionary of received commit requests. Key of dictionary is a 2
element tuple with elements viewNo, seqNo and value is a 2 element
tuple containing request digest and set of sender node names(sender
replica names in case of multiple protocol instances)
"""
def _get_key(self, commit):
return commit.viewNo, commit.ppSeqNo
# noinspection PyMethodMayBeStatic
def addVote(self, commit: Commit, voter: str) -> None:
"""
Add the specified COMMIT to this replica's list of received
COMMITs.
:param commit: the COMMIT to add to the list
:param voter: the name of the replica who sent the COMMIT
"""
super()._add_msg(commit, voter)
# noinspection PyMethodMayBeStatic
def hasCommit(self, commit: Commit) -> bool:
return super()._has_msg(commit)
# noinspection PyMethodMayBeStatic
def hasCommitFrom(self, commit: Commit, voter: str) -> bool:
return super()._has_vote(commit, voter)
def hasQuorum(self, commit: Commit, quorum: int) -> bool:
return self._has_enough_votes(commit, quorum)
InstanceChangesVotes = NamedTuple("InstanceChangesVotes", [
("voters", Dict[str, int]),
("msg", Optional[Any])])
class InstanceChanges(TrackedMsgs):
"""
Stores senders of received instance change requests. Key is the view
no and and value is the set of senders
Does not differentiate between reason for view change. Maybe it should,
but the current assumption is that since a malicious node can raise
different suspicions on different nodes, its ok to consider all suspicions
that can trigger a view change as equal
"""
def __init__(self, config, time_provider: Callable = time.perf_counter) -> None:
self._outdated_ic_interval = \
config.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL
self._time_provider = time_provider
super().__init__()
def _new_vote_msg(self, msg):
return InstanceChangesVotes(voters=dict(), msg=msg)
def _get_key(self, msg):
return msg if isinstance(msg, int) else msg.viewNo
def add_vote(self, msg, voter: str):
# This method can't use _add_message() because
# the voters collection is a dict.
key = self._get_key(msg)
if key not in self:
self[key] = self._new_vote_msg(msg)
self[key].voters[voter] = self._time_provider()
def has_view(self, view_no: int) -> bool:
self._update_votes(view_no)
return super()._has_msg(view_no)
def has_inst_chng_from(self, view_no: int, voter: str) -> bool:
self._update_votes(view_no)
return super()._has_vote(view_no, voter)
def has_quorum(self, view_no: int, quorum: int) -> bool:
self._update_votes(view_no)
return self._has_enough_votes(view_no, quorum)
def _update_votes(self, view_no: int):
if self._outdated_ic_interval <= 0 or view_no not in self:
return
for voter, vote_time in dict(self[view_no].voters).items():
now = self._time_provider()
if vote_time < now - self._outdated_ic_interval:
logger.info("Discard InstanceChange from {} for ViewNo {} "
"because it is out of date (was received {}sec "
"ago)".format(voter, view_no, int(now - vote_time)))
del self[view_no].voters[voter]
if not self[view_no].voters:
del self[view_no]
|
{"/plenum/test/server/test_models.py": ["/plenum/server/models.py"]}
|
7,785
|
Yoshinori-Koide-PRO/indy-plenum
|
refs/heads/master
|
/plenum/test/replica/conftest.py
|
import types
import pytest
from plenum.common.startable import Mode
from plenum.common.constants import POOL_LEDGER_ID
from plenum.common.util import get_utc_epoch
from plenum.server.node import Node
from plenum.server.quorums import Quorums
from plenum.server.replica import Replica
from plenum.test.conftest import getValueFromModule
from plenum.test.helper import MockTimestamp
from plenum.test.testing_utils import FakeSomething
class ReplicaFakeNode(FakeSomething):
def __init__(self, viewNo, quorums, ledger_ids):
node_stack = FakeSomething(
name="fake stack",
connecteds={"Alpha", "Beta", "Gamma", "Delta"}
)
super().__init__(
name="fake node",
ledger_ids=ledger_ids,
viewNo=viewNo,
quorums=quorums,
nodestack=node_stack,
utc_epoch=lambda *args: get_utc_epoch(),
mode=Mode.participating,
view_change_in_progress=False
)
@property
def is_synced(self) -> bool:
return Mode.is_done_syncing(self.mode)
@property
def isParticipating(self) -> bool:
return self.mode == Mode.participating
@pytest.fixture(scope='function', params=[0, 10])
def viewNo(tconf, request):
return request.param
@pytest.fixture(scope='function')
def mock_timestamp():
return MockTimestamp()
@pytest.fixture(scope='function')
def ledger_ids():
return [POOL_LEDGER_ID]
@pytest.fixture(scope='function', params=[1])
def inst_id(request):
return request.param
@pytest.fixture(scope="function")
def mock_timestamp():
return get_utc_epoch
@pytest.fixture(scope='function')
def replica(tconf, viewNo, inst_id, ledger_ids, mock_timestamp, request):
node = ReplicaFakeNode(viewNo=viewNo,
quorums=Quorums(getValueFromModule(request, 'nodeCount', default=4)),
ledger_ids=ledger_ids)
bls_bft_replica = FakeSomething(
gc=lambda *args: None,
update_pre_prepare=lambda params, l_id: params
)
replica = Replica(
node, instId=inst_id, isMaster=inst_id == 0,
config=tconf, bls_bft_replica=bls_bft_replica,
get_current_time=mock_timestamp,
get_time_for_3pc_batch=mock_timestamp
)
ReplicaFakeNode.master_last_ordered_3PC = replica.last_ordered_3pc
return replica
|
{"/plenum/test/server/test_models.py": ["/plenum/server/models.py"]}
|
7,786
|
Yoshinori-Koide-PRO/indy-plenum
|
refs/heads/master
|
/plenum/test/server/test_models.py
|
import pytest
from plenum.common.messages.node_messages import InstanceChange
from plenum.server.models import InstanceChanges
from plenum.server.suspicion_codes import Suspicions
from plenum.test.helper import MockTimestamp
@pytest.fixture(scope="function")
def instance_changes(tconf):
return InstanceChanges(tconf)
def test_instance_changes_are_empty_when_created(instance_changes):
frm = "Node1"
view_no = 1
assert not instance_changes
assert view_no not in instance_changes
assert not instance_changes.has_view(view_no)
assert not instance_changes.has_inst_chng_from(view_no, frm)
def test_add_first_vote(instance_changes):
frm = "Node1"
view_no = 1
msg = InstanceChange(view_no, Suspicions.PRIMARY_DEGRADED.code)
assert not instance_changes
instance_changes.add_vote(msg, frm)
assert instance_changes[view_no].msg == msg
assert instance_changes[view_no].voters[frm]
assert instance_changes.has_view(view_no)
assert instance_changes.has_inst_chng_from(view_no, frm)
def test_equal_votes_dont_accumulate_when_added(instance_changes, tconf):
frm = "Node1"
view_no = 1
time_provider = MockTimestamp(0)
second_vote_time = 1
instance_changes = InstanceChanges(tconf, time_provider)
msg = InstanceChange(view_no, Suspicions.PRIMARY_DEGRADED.code)
instance_changes.add_vote(msg, frm)
time_provider.value = second_vote_time
instance_changes.add_vote(msg, frm)
assert instance_changes[view_no].voters[frm] == second_vote_time
assert len(instance_changes[view_no].voters) == 1
assert len(instance_changes) == 1
def test_too_old_messages_dont_count_towards_quorum(instance_changes, tconf):
frm1 = "Node1"
frm2 = "Node2"
view_no = 1
quorum = 2
time_provider = MockTimestamp(0)
instance_changes = InstanceChanges(tconf, time_provider)
msg = InstanceChange(view_no, Suspicions.PRIMARY_DEGRADED.code)
instance_changes.add_vote(msg, frm1)
time_provider.value += (tconf.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL/2)
instance_changes.add_vote(msg, frm2)
time_provider.value += (tconf.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL/2) + 1
assert not instance_changes.has_quorum(view_no, quorum)
assert instance_changes.has_view(view_no)
assert instance_changes[view_no].msg == msg
assert not instance_changes.has_inst_chng_from(view_no, frm1)
assert instance_changes.has_inst_chng_from(view_no, frm2)
def test_instance_changes_has_quorum_when_enough_distinct_votes_are_added(instance_changes):
quorum = 2
view_no = 1
assert not instance_changes.has_quorum(view_no, quorum)
for i in range(quorum):
instance_changes.add_vote(InstanceChange(view_no, Suspicions.PRIMARY_DEGRADED.code),
"Node{}".format(i))
assert instance_changes.has_quorum(view_no, quorum)
def test_old_ic_discard(instance_changes, tconf):
frm = "Node1"
view_no = 1
quorum = 2
time_provider = MockTimestamp(0)
instance_changes = InstanceChanges(tconf, time_provider)
msg = InstanceChange(view_no, Suspicions.PRIMARY_DEGRADED.code)
time_provider.value = 0
instance_changes.add_vote(msg, frm)
time_provider.value += tconf.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL + 1
assert not instance_changes.has_view(view_no)
instance_changes.add_vote(msg, frm)
time_provider.value += tconf.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL + 1
assert not instance_changes.has_inst_chng_from(view_no, frm)
instance_changes.add_vote(msg, frm)
time_provider.value += tconf.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL + 1
assert not instance_changes.has_quorum(view_no, quorum)
|
{"/plenum/test/server/test_models.py": ["/plenum/server/models.py"]}
|
7,787
|
Yoshinori-Koide-PRO/indy-plenum
|
refs/heads/master
|
/plenum/test/pool_transactions/test_txn_pool_manager.py
|
import pytest
from plenum.common.config_helper import PNodeConfigHelper
from plenum.test.test_node import TestNode
from stp_core.loop.eventually import eventually
from plenum.common.metrics_collector import MetricsName
from plenum.test.helper import sdk_send_random_and_check
from plenum.common.txn_util import get_type, get_payload_data
from plenum.common.constants import TARGET_NYM, NODE, \
CLIENT_STACK_SUFFIX, DATA, ALIAS, SERVICES, VALIDATOR, TXN_PAYLOAD
from plenum.test.pool_transactions.helper import demote_node
nodeCount = 7
nodes_wth_bls = 0
def test_twice_demoted_node_dont_write_txns(txnPoolNodeSet,
looper, sdk_wallet_stewards, sdk_pool_handle):
request_count = 5
demoted_node = txnPoolNodeSet[2]
alive_pool = list(txnPoolNodeSet)
alive_pool.remove(demoted_node)
def get_node_prods_count(node):
return node.metrics._accumulators[MetricsName.NODE_PROD_TIME].count
def is_prods_run(node, old, diff):
new = get_node_prods_count(node)
assert old + diff <= new
demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, demoted_node)
demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, demoted_node)
demoted_nym = None
for _, txn in txnPoolNodeSet[0].poolManager.ledger.getAllTxn():
txn_data = get_payload_data(txn)
if txn_data[DATA][ALIAS] == demoted_node.name:
demoted_nym = txn_data[TARGET_NYM]
break
assert demoted_nym
# Every node demote `demoted_node`
assert all(node.poolManager.reqHandler.getNodeData(demoted_nym)[SERVICES] == []
for node in alive_pool)
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_stewards[0], request_count)
old = get_node_prods_count(txnPoolNodeSet[0])
# Let primary node make 2 prod runs so we make sure that
# node did not appear in network reconnection
looper.run(eventually(is_prods_run, txnPoolNodeSet[0], old, 2))
assert txnPoolNodeSet[0].domainLedger.size - request_count == \
demoted_node.domainLedger.size
def test_get_nym_by_name(txnPoolNodeSet, pool_node_txns):
check_get_nym_by_name(txnPoolNodeSet, pool_node_txns)
def test_get_nym_by_name_not_in_registry(txnPoolNodeSet, pool_node_txns):
nodes_to_remove = [txnPoolNodeSet[4].name, txnPoolNodeSet[5].name]
for node in txnPoolNodeSet:
for node_to_remove in nodes_to_remove:
del node.nodeReg[node_to_remove]
del node.cliNodeReg[node_to_remove + CLIENT_STACK_SUFFIX]
check_get_nym_by_name(txnPoolNodeSet, pool_node_txns)
def test_get_nym_by_name_demoted(txnPoolNodeSet, pool_node_txns,
looper, sdk_wallet_stewards, sdk_pool_handle):
demote_node(looper, sdk_wallet_stewards[0], sdk_pool_handle,
txnPoolNodeSet[0])
check_get_nym_by_name(txnPoolNodeSet, pool_node_txns)
def check_get_nym_by_name(txnPoolNodeSet, pool_node_txns):
for i in range(nodeCount):
node = txnPoolNodeSet[i]
pool_manager = node.poolManager
node_name = node.name
node_nym = pool_manager.get_nym_by_name(node_name)
expected_data = get_payload_data(pool_node_txns[i])[TARGET_NYM]
assert node_nym
assert node_nym == expected_data
|
{"/plenum/test/server/test_models.py": ["/plenum/server/models.py"]}
|
7,788
|
Yoshinori-Koide-PRO/indy-plenum
|
refs/heads/master
|
/plenum/server/batch_handlers/config_batch_handler.py
|
from plenum.common.constants import CONFIG_LEDGER_ID
from plenum.server.batch_handlers.batch_request_handler import BatchRequestHandler
from plenum.server.database_manager import DatabaseManager
class ConfigBatchHandler(BatchRequestHandler):
def __init__(self, database_manager: DatabaseManager):
super().__init__(database_manager, CONFIG_LEDGER_ID)
def commit_batch(self, txn_count, state_root, txn_root, pp_time, prev_result):
return super().commit_batch(txn_count, state_root, txn_root, pp_time, prev_result)
def post_batch_applied(self, state_root):
pass
def post_batch_rejected(self):
pass
|
{"/plenum/test/server/test_models.py": ["/plenum/server/models.py"]}
|
7,789
|
Yoshinori-Koide-PRO/indy-plenum
|
refs/heads/master
|
/plenum/server/batch_handlers/pool_batch_handler.py
|
from plenum.common.constants import POOL_LEDGER_ID
from plenum.server.batch_handlers.batch_request_handler import BatchRequestHandler
from plenum.server.database_manager import DatabaseManager
class PoolBatchHandler(BatchRequestHandler):
def __init__(self, database_manager: DatabaseManager):
super().__init__(database_manager, POOL_LEDGER_ID)
def commit_batch(self, txn_count, state_root, txn_root, pp_time, prev_result):
return super().commit_batch(txn_count, state_root, txn_root, pp_time, prev_result)
def post_batch_applied(self, state_root):
pass
def post_batch_rejected(self):
pass
|
{"/plenum/test/server/test_models.py": ["/plenum/server/models.py"]}
|
7,790
|
Yoshinori-Koide-PRO/indy-plenum
|
refs/heads/master
|
/plenum/test/tracker/test_ledger_uncommitted_tracker.py
|
import pytest
import random
from plenum.common.util import randomString
from plenum.common.ledger_uncommitted_tracker import LedgerUncommittedTracker
from common.exceptions import PlenumValueError, LogicError
@pytest.fixture()
def state_root():
return randomString(32).encode()
@pytest.fixture()
def init_committed_root():
return "initial_committed_root_hash"
@pytest.fixture()
def init_ledger_size():
return 10
@pytest.fixture()
def make_tracker(init_committed_root, init_ledger_size):
return LedgerUncommittedTracker(init_committed_root, init_ledger_size)
def test_reject_batch_errors_when_no_uncommitted(make_tracker):
with pytest.raises(LogicError):
make_tracker.reject_batch()
def test_last_committed_is_equal_when_one_item_uncommitted(make_tracker,
init_committed_root,
state_root,
init_ledger_size):
uncommitted_txns_count = 5
make_tracker.apply_batch(state_root, init_ledger_size + uncommitted_txns_count)
test_tuple = (init_committed_root, uncommitted_txns_count)
assert test_tuple == make_tracker.reject_batch()
def test_last_committed_is_equal_when_multiple_items_uncommitted(make_tracker,
init_committed_root):
make_tracker.apply_batch("uncommitted_state_root_1", 12)
make_tracker.apply_batch("uncommitted_state_root_2", 16)
assert ("uncommitted_state_root_1", 4) == make_tracker.reject_batch()
assert (init_committed_root, 2) == make_tracker.reject_batch()
def test_error_with_no_state_root_track_uncommitted(make_tracker):
with pytest.raises(PlenumValueError):
make_tracker.apply_batch("", 12)
def test_error_with_invalid_ledger_size(make_tracker, state_root):
with pytest.raises(PlenumValueError):
make_tracker.apply_batch(state_root, random.randint(-99, 0))
def test_error_with_revert_empty_tracker(make_tracker):
with pytest.raises(LogicError):
make_tracker.reject_batch()
def test_apply_batch_success(make_tracker, state_root):
make_tracker.apply_batch(state_root, random.randint(1, 100))
def test_commit_batch_success(make_tracker,
state_root,
init_ledger_size):
test_ledger_size = 1000
test_root = "test_root"
make_tracker.apply_batch(test_root, test_ledger_size)
make_tracker.commit_batch()
assert make_tracker.last_committed == (test_root, test_ledger_size)
def test_raise_error_if_commit_without_un_committed(make_tracker):
with pytest.raises(PlenumValueError, match="commit_batch was called, but there is no tracked uncommitted states"):
make_tracker.commit_batch()
def test_not_remove_last_committed_after_reject_last_batch(make_tracker):
test_ledger_size = 1000
test_root = "test_root"
make_tracker.apply_batch(test_root, test_ledger_size)
make_tracker.reject_batch()
assert make_tracker.last_committed is not None
def test_apply_batch_with_zero_ledger_size(make_tracker):
test_ledger_size = 0
test_root = "test_root"
make_tracker.apply_batch(test_root, test_ledger_size)
|
{"/plenum/test/server/test_models.py": ["/plenum/server/models.py"]}
|
7,791
|
Yoshinori-Koide-PRO/indy-plenum
|
refs/heads/master
|
/plenum/test/replica/test_api.py
|
import types
from copy import copy
import pytest
from common.exceptions import LogicError, PlenumValueError
from plenum.test.primary_selection.test_primary_selector import FakeNode
from plenum.test.testing_utils import FakeSomething
from plenum.common.constants import POOL_LEDGER_ID, CURRENT_PROTOCOL_VERSION, DOMAIN_LEDGER_ID
from plenum.common.messages.node_messages import PrePrepare
from plenum.common.types import f
from plenum.server.suspicion_codes import Suspicions
from plenum.test.bls.conftest import fake_state_root_hash, fake_multi_sig, fake_multi_sig_value
from plenum.test.helper import sdk_random_request_objects, create_pre_prepare_params, create_pre_prepare_no_bls, \
create_prepare, generate_state_root
from stp_zmq.zstack import ZStack
nodeCount = 4
@pytest.fixture()
def fake_node(tdir, tconf):
node = FakeNode(tdir, config=tconf)
node.isParticipating = True
replica = node.replicas[0]
state_root = "EuDgqga9DNr4bjH57Rdq6BRtvCN1PV9UX5Mpnm9gbMAZ"
replica.node.isParticipating = True
replica.stateRootHash = lambda ledger, to_str=False: state_root
replica._apply_pre_prepare = lambda a, b: None
replica.primaryNames[replica.viewNo] = replica.primaryName
replica._gc = lambda args: None
replica.primaryName = "Alpha:0"
return node
@pytest.fixture(scope="function")
def fake_replica(replica):
replica.nonFinalisedReqs = lambda a: []
replica._bls_bft_replica.validate_pre_prepare = lambda a, b: None
replica._bls_bft_replica.update_prepare = lambda a, b: a
replica._bls_bft_replica.process_prepare = lambda a, b: None
replica._apply_pre_prepare = lambda a, b: None
replica.primaryName = "Alpha:{}".format(replica.instId)
replica.primaryNames[replica.viewNo] = replica.primaryName
return replica
@pytest.fixture(scope="function",
params=[generate_state_root(), None],
ids=lambda x: 'None' if x is None else 'not_None')
def pool_state_root(request):
return request.param
@pytest.fixture(scope="function", params=[True, False])
def pre_prepare(replica, pool_state_root, fake_state_root_hash, fake_multi_sig, request):
params = create_pre_prepare_params(state_root=fake_state_root_hash,
view_no=replica.viewNo,
pool_state_root=pool_state_root)
pp = PrePrepare(*params)
if request.param:
setattr(pre_prepare, f.BLS_MULTI_SIG.nm, fake_multi_sig)
return pp
def test_is_next_pre_prepare(replica):
pp_view_no = 2
pp_seq_no = 1
replica._last_ordered_3pc = (1, 2)
assert replica.viewNo != pp_view_no
with pytest.raises(LogicError) as excinfo:
replica._Replica__is_next_pre_prepare(pp_view_no, pp_seq_no)
assert (("{} is not equal to current view_no {}"
.format(pp_view_no, replica.viewNo)) in str(excinfo.value))
def test_last_prepared_certificate_in_view(replica):
with pytest.raises(LogicError) as excinfo:
replica.last_prepared_certificate_in_view()
assert "is not a master" in str(excinfo.value)
def test_order_3pc_key(replica):
with pytest.raises(ValueError) as excinfo:
replica.order_3pc_key((1, 1))
assert ("no PrePrepare with a 'key' {} found"
.format((1, 1))) in str(excinfo.value)
def test_can_pp_seq_no_be_in_view(replica):
view_no = replica.viewNo + 1
assert replica.viewNo < view_no
with pytest.raises(PlenumValueError) as excinfo:
replica.can_pp_seq_no_be_in_view(view_no, 1)
assert ("expected: <= current view_no {}"
.format(replica.viewNo)) in str(excinfo.value)
def test_is_msg_from_primary_doesnt_crash_on_msg_with_view_greater_than_current(replica):
class FakeMsg:
def __init__(self, viewNo):
self.viewNo = viewNo
invalid_view_no = 1 if replica.viewNo is None else replica.viewNo + 1
# This shouldn't crash
replica.isMsgFromPrimary(FakeMsg(invalid_view_no), "some_sender")
def test_remove_stashed_checkpoints_doesnt_crash_when_current_view_no_is_greater_than_last_stashed_checkpoint(replica):
till_3pc_key = (1, 1)
replica.stashedRecvdCheckpoints[1] = {till_3pc_key: {}}
setattr(replica.node, 'viewNo', 2)
# This shouldn't crash
replica._remove_stashed_checkpoints(till_3pc_key)
def test_last_prepared_none_if_no_prepares(replica):
"""
There is no any prepares for this replica. In that case we expect,
that last_prepares_sertificate will return None
"""
replica.isMaster = True
assert len(replica.prepares) == 0
assert replica.last_prepared_certificate_in_view() is None
def test_last_prepared_sertificate_return_max_3PC_key(replica):
"""
All the prepares has enough quorum. Expected result is that last_prepared_sertificate
must be Max3PCKey(all of prepare's keys) == (0, 2)
"""
replica.isMaster = True
replica.prepares.clear()
prepare1 = create_prepare(req_key=(0, 1),
state_root='8J7o1k3mDX2jtBvgVfFbijdy6NKbfeJ7SfY3K1nHLzQB')
prepare1.voters = ('Alpha:0', 'Beta:0', 'Gamma:0', 'Delta:0')
replica.prepares[(0, 1)] = prepare1
prepare2 = create_prepare(req_key=(0, 1),
state_root='EuDgqga9DNr4bjH57Rdq6BRtvCN1PV9UX5Mpnm9gbMAZ')
prepare2.voters = ('Alpha:0', 'Beta:0', 'Gamma:0', 'Delta:0')
replica.prepares[(0, 2)] = prepare2
assert replica.last_prepared_certificate_in_view() == (0, 2)
def test_lst_sertificate_return_max_3PC_key_of_quorumed_prepare(replica):
"""
Prepare with key (0, 2) does not have quorum of prepare.
Therefore, expected Max3PC key must be (0, 1), because of previous prepare has enough quorum
"""
replica.isMaster = True
replica.prepares.clear()
prepare1 = create_prepare(req_key=(0, 1),
state_root='8J7o1k3mDX2jtBvgVfFbijdy6NKbfeJ7SfY3K1nHLzQB')
prepare1.voters = ('Alpha:0', 'Beta:0', 'Gamma:0', 'Delta:0')
replica.prepares[(0, 1)] = prepare1
prepare2 = create_prepare(req_key=(0, 1),
state_root='EuDgqga9DNr4bjH57Rdq6BRtvCN1PV9UX5Mpnm9gbMAZ')
prepare2.voters = ('Delta:0',)
replica.prepares[(0, 2)] = prepare2
assert replica.last_prepared_certificate_in_view() == (0, 1)
def test_request_prepare_doesnt_crash_when_primary_is_not_connected(replica):
replica.primaryName = 'Omega:0'
replica.node.request_msg = lambda t, d, r: None
# This shouldn't crash
replica._request_prepare((0, 1))
def test_create_3pc_batch_with_empty_requests(replica):
def patched_stateRootHash(self, ledger_id, to_str=None):
return "EuDgqga9DNr4bjH57Rdq6BRtvCN1PV9UX5Mpnm9gbMAZ"
replica.stateRootHash = types.MethodType(patched_stateRootHash, replica)
pp = replica.create_3pc_batch(0)
assert pp is not None
assert pp.reqIdr == []
def test_create_3pc_batch(replica):
root_hash = ["EuDgqga9DNr4bjH57Rdq6BRtvCN1PV9UX5Mpnm9gbMAZ",
"QuDgqga9DNr4bjH57Rdq6BRtvCN1PV9UX5Mpnm9gbMAZ"]
requests = sdk_random_request_objects(2, identifier="did",
protocol_version=CURRENT_PROTOCOL_VERSION)
ledger_id = POOL_LEDGER_ID
replica.consume_req_queue_for_pre_prepare = \
lambda ledger, tm, view_no, pp_seq_no: (requests, [], [])
replica.stateRootHash = lambda ledger, to_str=False: root_hash[ledger]
pre_prepare_msg = replica.create_3pc_batch(ledger_id)
assert pre_prepare_msg.poolStateRootHash == root_hash[POOL_LEDGER_ID]
assert pre_prepare_msg.stateRootHash == root_hash[ledger_id]
assert pre_prepare_msg.ppSeqNo == 1
assert pre_prepare_msg.ledgerId == ledger_id
assert pre_prepare_msg.viewNo == replica.viewNo
assert pre_prepare_msg.instId == replica.instId
assert pre_prepare_msg.reqIdr == [req.digest for req in requests]
assert f.BLS_MULTI_SIG.nm not in pre_prepare_msg
def test_process_pre_prepare_validation(fake_replica,
pre_prepare,
pool_state_root,
fake_state_root_hash):
state_roots = [pool_state_root, fake_state_root_hash]
fake_replica.stateRootHash = lambda ledger, to_str=False: state_roots[ledger]
def reportSuspiciousNodeEx(ex):
assert False, ex
fake_replica.node.reportSuspiciousNodeEx = reportSuspiciousNodeEx
fake_replica.processPrePrepare(pre_prepare, fake_replica.primaryName)
def test_process_pre_prepare_validation_old_schema(fake_replica,
pre_prepare,
pool_state_root,
fake_state_root_hash):
serialized_pp = ZStack.serializeMsg(pre_prepare)
deserialized_pp = ZStack.deserializeMsg(serialized_pp)
new_schema = copy(PrePrepare.schema)
PrePrepare.schema = tuple(y for y in PrePrepare.schema if y[0] != f.POOL_STATE_ROOT_HASH.nm)
assert f.POOL_STATE_ROOT_HASH.nm not in PrePrepare.schema
pp = PrePrepare(**deserialized_pp)
state_roots = [pool_state_root, fake_state_root_hash]
fake_replica.stateRootHash = lambda ledger, to_str=False: state_roots[ledger]
def reportSuspiciousNodeEx(ex):
assert False, ex
fake_replica.node.reportSuspiciousNodeEx = reportSuspiciousNodeEx
fake_replica.processPrePrepare(pp, fake_replica.primaryName)
PrePrepare.schema = new_schema
def test_process_pre_prepare_with_incorrect_pool_state_root(fake_replica):
state_roots = ["EuDgqga9DNr4bjH57Rdq6BRtvCN1PV9UX5Mpnm9gbMAZ",
"C95JmfG5DYAE8ZcdTTFMiwcZaDN6CRVdSdkhBXnkYPio"]
fake_replica.stateRootHash = lambda ledger, to_str=False: state_roots[ledger]
def reportSuspiciousNodeEx(ex):
assert Suspicions.PPR_POOL_STATE_ROOT_HASH_WRONG.code == ex.code
fake_replica.node.reportSuspiciousNodeEx = reportSuspiciousNodeEx
pp = create_pre_prepare_no_bls(state_roots[DOMAIN_LEDGER_ID],
fake_replica.viewNo,
"HSai3sMHKeAva4gWMabDrm1yNhezvPHfXnGyHf2ex1L4")
fake_replica.processPrePrepare(pp, fake_replica.primaryName)
def test_process_pre_prepare_with_not_final_request(fake_node):
fake_node.seqNoDB = FakeSomething(get=lambda req: (None, None))
replica = fake_node.replicas[0]
pp = create_pre_prepare_no_bls(replica.stateRootHash(DOMAIN_LEDGER_ID))
replica.nonFinalisedReqs = lambda a: set(pp.reqIdr)
def reportSuspiciousNodeEx(ex):
assert False, ex
replica.node.reportSuspiciousNodeEx = reportSuspiciousNodeEx
def request_propagates(reqs):
assert reqs == set(pp.reqIdr)
replica.node.request_propagates = request_propagates
replica.processPrePrepare(pp, replica.primaryName)
assert (pp, replica.primaryName, set(pp.reqIdr)) in replica.prePreparesPendingFinReqs
def test_process_pre_prepare_with_ordered_request(fake_node):
fake_node.seqNoDB = FakeSomething(get=lambda req: (1, 1))
replica = fake_node.replicas[0]
pp = create_pre_prepare_no_bls(replica.stateRootHash(DOMAIN_LEDGER_ID))
replica.nonFinalisedReqs = lambda a: pp.reqIdr
def reportSuspiciousNodeEx(ex):
assert ex.code == Suspicions.PPR_WITH_ORDERED_REQUEST.code
replica.node.reportSuspiciousNodeEx = reportSuspiciousNodeEx
def request_propagates(reqs):
assert False, "Requested propagates for: {}".format(reqs)
replica.node.request_propagates = request_propagates
replica.processPrePrepare(pp, replica.primaryName)
assert (pp, replica.primaryName, set(pp.reqIdr)) not in replica.prePreparesPendingFinReqs
|
{"/plenum/test/server/test_models.py": ["/plenum/server/models.py"]}
|
7,800
|
wangsp90/form01
|
refs/heads/master
|
/form_test/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import View
from .forms import message
# Create your views here.
class index(View):
def get(self,request):
form=message()
return render(request,'index.html',context={"form":form})
def post(self,request):
form=message(request.POST)
if form.is_valid():
title=form.cleaned_data.get('title')
content=form.cleaned_data.get('content')
email=form.cleaned_data.get('email')
reply=form.cleaned_data.get('reply')
print ("="*20)
print (title)
print (content)
print (email)
print (reply)
print ("="*20)
return HttpResponse("success!")
else:
print (form.errors)
return HttpResponse("Error!")
|
{"/form_test/views.py": ["/form_test/forms.py"]}
|
7,801
|
wangsp90/form01
|
refs/heads/master
|
/form_test/forms.py
|
#encoding utf-8
from django import forms
class message(forms.Form):
title=forms.CharField(max_length=255,min_length=4,label='标题')
content=forms.CharField(widget=forms.Textarea,label='内容')
email=forms.EmailField(required=True,label='邮箱')
reply=forms.BooleanField(required=False,label='回复')
|
{"/form_test/views.py": ["/form_test/forms.py"]}
|
7,802
|
wangsp90/form01
|
refs/heads/master
|
/form_test/models.py
|
from django.db import models
#级表是固定的,员工表是员工信息,还有员工的联系方式表
'''
class article(models.Model):
NAME=models.CharField(null=False,max_length=255)
CONTENT=models.TextField()
class Meta:
db_table='article'
'''
|
{"/form_test/views.py": ["/form_test/forms.py"]}
|
7,806
|
jellyDL/Lightweight-Segmentation
|
refs/heads/master
|
/light/data/segdata.py
|
"""SegmentationData Dataloader"""
import os
import random
import numpy as np
import torch
from torch.utils.data import Dataset
from PIL import Image, ImageOps, ImageFilter
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import torch.utils.data as data
class SegmentationData(Dataset):
"""Custom Segmentation Dataset.
Parameters
----------
root : string
Path to Cityscapes folder. Default is './datasets/citys'
split: string
'train', 'valid' or 'test'
transform : callable, optional
A function that transforms the image
"""
# BASE_DIR = 'segdata'
NUM_CLASS = 3
def __init__(self, images_dir, masks_dir, nb_classes, mode=None,
transform=None, base_size=480, crop_size=480, **kwargs):
super(SegmentationData, self).__init__()
self.mode = mode
self.nb_classes=nb_classes
self.ids = os.listdir(images_dir)
self.transform = transform
self.base_size = base_size
self.crop_size = crop_size
self.images = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.masks = [os.path.join(masks_dir, image_id.split('.')[0] + '.npy') \
for image_id in self.ids]
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of: " + images_dir + "\n")
# self.valid_classes = [0, 1, 2]
# def _class_to_index(self, mask):
# values = np.unique(mask)
# for value in values:
# assert (value in self._mapping)
# index = np.digitize(mask.ravel(), self._mapping, right=True)
# return self._key[index].reshape(mask.shape)
def __len__(self):
return len(self.ids)
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = np.load(self.masks[index])
mask[mask > self.nb_classes - 1] = 0 # set other class to zero
mask = Image.fromarray(mask)
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'valid':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask
def _val_sync_transform(self, img, mask):
outsize = self.crop_size
short_size = outsize
w, h = img.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = img.size
x1 = int(round((w - outsize) / 2.))
y1 = int(round((h - outsize) / 2.))
img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
def _sync_transform(self, img, mask):
# random mirror
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = img.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size))
mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# gaussian blur as in PSP
if random.random() < 0.5:
img = img.filter(ImageFilter.GaussianBlur(
radius=random.random()))
# print(type(img)SegmentationData)
# final transformSegmentationData
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
def _img_transform(self, img):
return np.array(img)
def _mask_transform(self, mask):
# target = self._class_to_index(np.array(mask).astype('int32'))
# return torch.LongTensor(np.array(target).astype('int32'))
return torch.LongTensor(np.array(mask).astype('int32'))
def __len__(self):
return len(self.images)
@property
def num_class(self):
"""Number of categories."""
return self.nb_classes
# def _get_city_pairs(folder, split='train'):
# def get_path_pairs(img_folder, mask_folder):
# img_paths = []
# mask_paths = []
# for root, _, files in os.walk(img_folder):
# for filename in files:
# if filename.endswith(".png"):
# imgpath = os.path.join(root, filename)
# foldername = os.path.basename(os.path.dirname(imgpath))
# maskname = filename.replace('leftImg8bit', 'gtFine_labelIds')
# maskpath = os.path.join(mask_folder, foldername, maskname)
# if os.path.isfile(imgpath) and os.path.isfile(maskpath):
# img_paths.append(imgpath)
# mask_paths.append(maskpath)
# else:
# print('cannot find the mask or image:', imgpath, maskpath)
# print('Found {} images in the folder {}'.format(len(img_paths), img_folder))
# return img_paths, mask_paths
# if split in ('train', 'val'):
# img_folder = os.path.join(folder, 'leftImg8bit/' + split)
# mask_folder = os.path.join(folder, 'gtFine/' + split)
# img_paths, mask_paths = get_path_pairs(img_folder, mask_folder)
# return img_paths, mask_paths
# else:
# assert split == 'trainval'
# print('trainval set')
# train_img_folder = os.path.join(folder, 'leftImg8bit/train')
# train_mask_folder = os.path.join(folder, 'gtFine/train')
# val_img_folder = os.path.join(folder, 'leftImg8bit/val')
# val_mask_folder = os.path.join(folder, 'gtFine/val')
# train_img_paths, train_mask_paths = get_path_pairs(train_img_folder, train_mask_folder)
# val_img_paths, val_mask_paths = get_path_pairs(val_img_folder, val_mask_folder)
# img_paths = train_img_paths + val_img_paths
# mask_paths = train_mask_paths + val_mask_paths
# return img_paths, mask_paths
def mask_to_image(mask):
h = mask.shape[0]
w = mask.shape[1]
mask_rgb = Image.new('RGB', (w, h))
for j in range(0, h):
for i in range(0, w):
pixal = mask[j, i]
if pixal == 0:
mask_rgb.putpixel((i, j), (61,10,81))
elif pixal == 1:
mask_rgb.putpixel((i, j), (69,142,139))
elif pixal == 2:
mask_rgb.putpixel((i, j), (250,231,85))
else:
mask_rgb.putpixel((i, j), (255, 255, 255))
return mask_rgb
if __name__ == '__main__':
nb_classes = 3
train_img_dir = '/home/wangjialei/teeth_dataset/new_data_20190621/train_new/images'
train_mask_dir = '/home/wangjialei/teeth_dataset/new_data_20190621/train_new/masks'
valid_img_dir = '/home/wangjialei/teeth_dataset/new_data_20190621/valid_new/images'
valid_mask_dir = '/home/wangjialei/teeth_dataset/new_data_20190621/valid_new//masks'
train_transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.519401, 0.359217, 0.310136], [0.061113, 0.048637, 0.041166]),#R_var is 0.061113, G_var is 0.048637, B_var is 0.041166
])
valid_transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.517446, 0.360147, 0.310427], [0.061526,0.049087, 0.041330])#R_var is 0.061526, G_var is 0.049087, B_var is 0.041330
])
train_set = SegmentationData(images_dir=train_img_dir, masks_dir=train_mask_dir, nb_classes=nb_classes, mode='train', transform=train_transform)
train_loader = data.DataLoader(dataset=train_set, batch_size = 1, shuffle = True, num_workers=4, pin_memory=True)
valid_set = SegmentationData(images_dir=valid_img_dir, masks_dir=valid_mask_dir, nb_classes=nb_classes, mode='valid', transform=valid_transform)
valid_loader = data.DataLoader(dataset=valid_set, batch_size = 1, shuffle = False, num_workers=4, pin_memory=True)
# train
for iteration, (images, targets) in enumerate(train_loader):
if iteration % 10 == 0:
print("### proc ", iteration, " / ", len(train_loader))
img = images[0].numpy()*255
img = img.astype('uint8')
img = np.transpose(img,(1,2,0))
mask = targets[0]
mask[mask > nb_classes - 1] = 0
mask = mask_to_image(mask)
plt.subplot(1, 2, 1)
plt.title('image')
plt.imshow(img)
plt.subplot(1, 2, 2)
plt.title('mask')
plt.imshow(mask)
save_file = "train/"
os.makedirs(save_file, exist_ok=True)
plt.savefig(os.path.join(save_file, str(iteration) + '.png'))
#valid
for iteration, (images, targets) in enumerate(valid_loader):
if iteration % 10 == 0:
print("### proc ", iteration, " / ", len(valid_loader))
img = images[0].numpy()*255
img = img.astype('uint8')
img = np.transpose(img,(1,2,0))
mask = targets[0]
mask[mask > nb_classes - 1] = 0
mask = mask_to_image(mask)
plt.subplot(1, 2, 1)
plt.title('image')
plt.imshow(img)
plt.subplot(1, 2, 2)
plt.title('mask')
plt.imshow(mask)
save_file = "valid/"
os.makedirs(save_file, exist_ok=True)
plt.savefig(os.path.join(save_file, str(iteration) + '.png'))
|
{"/scripts/eval.py": ["/light/data/segdata.py"]}
|
7,807
|
jellyDL/Lightweight-Segmentation
|
refs/heads/master
|
/scripts/eval.py
|
import os
import sys
cur_path = os.path.abspath(os.path.dirname(__file__))
root_path = os.path.split(cur_path)[0]
sys.path.append(root_path)
import torch
import torch.utils.data as data
import torch.backends.cudnn as cudnn
from torchvision import transforms
from light.data import get_segmentation_dataset
from light.model import get_segmentation_model
from light.utils.metric import SegmentationMetric
from light.utils.visualize import get_color_pallete
from light.utils.logger import setup_logger
from light.utils.distributed import synchronize, get_rank, make_data_sampler, make_batch_data_sampler
from train import parse_args
from light.data.segdata import SegmentationData
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
def mask_to_image(mask):
h = mask.shape[0]
w = mask.shape[1]
mask_rgb = Image.new('RGB', (w, h))
for j in range(0, h):
for i in range(0, w):
pixal = mask[j, i]
if pixal == 0:
mask_rgb.putpixel((i, j), (61,10,81))
elif pixal == 1:
mask_rgb.putpixel((i, j), (69,142,139))
elif pixal == 2:
mask_rgb.putpixel((i, j), (250,231,85))
else:
mask_rgb.putpixel((i, j), (255, 255, 255))
return mask_rgb
def data_process(x):
x = np.array(x, dtype='float32') / 255
if x.ndim < 4:
x = np.expand_dims(x, 0)
x = np.transpose(x, (0, 3, 1, 2))
x = torch.from_numpy(x)
mean = [0.517446, 0.360147, 0.310427]
std = [0.061526, 0.049087, 0.041330]
mean = torch.tensor(mean, dtype=torch.float32)
std = torch.tensor(std, dtype=torch.float32)
return x.sub_(mean[:, None, None]).div_(std[:, None, None])
class Evaluator(object):
def __init__(self, args):
self.args = args
self.device = torch.device(args.device)
self.nb_classes = 3
valid_img_dir = '/home/wangjialei/teeth_dataset/new_data_20190621/valid_new/images'
valid_mask_dir = '/home/wangjialei/teeth_dataset/new_data_20190621/valid_new/masks'
# valid_transform=transforms.Compose([
# # transforms.ToTensor(),
# # transforms.Normalize([0.517446, 0.360147, 0.310427], [0.061526,0.049087, 0.041330])#R_var is 0.061526, G_var is 0.049087, B_var is 0.041330
# ])
# dataset and dataloader
valid_set = SegmentationData(images_dir=valid_img_dir, masks_dir=valid_mask_dir, nb_classes=self.nb_classes, mode='valid', transform=None)
valid_sampler = make_data_sampler(valid_set, False, args.distributed)
valid_batch_sampler = make_batch_data_sampler(valid_sampler, images_per_batch=1)
self.val_loader = data.DataLoader(dataset=valid_set,
batch_sampler=valid_batch_sampler,
num_workers=args.workers,
pin_memory=True)
# create network
self.model = get_segmentation_model(model=args.model, dataset=args.dataset,
aux=args.aux, pretrained=True, pretrained_base=False)
if args.distributed:
self.model = self.model.module
self.model.to(self.device)
self.metric = SegmentationMetric(valid_set.num_class)
def eval(self):
self.metric.reset()
self.model.eval()
if self.args.distributed:
model = self.model.module
else:
model = self.model
logger.info("Start validation, Total sample: {:d}".format(len(self.val_loader)))
for i, (image, target) in enumerate(self.val_loader):
img = data_process(image)
img = img.to(self.device)
target = target.to(self.device)
with torch.no_grad():
outputs = model(img)
self.metric.update(outputs, target)
pixAcc, mIoU = self.metric.get()
logger.info("Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
i + 1, pixAcc * 100, mIoU * 100))
if self.args.save_pred:
pred = torch.argmax(outputs[0], 1)
pred = pred.cpu().data.numpy()
predict = pred.squeeze(0)
img_show = image[0].numpy()
img_show = img_show.astype('uint8')
plt.subplot(1, 3, 1)
plt.title('image')
plt.imshow(img_show)
mask = target.cpu().data.numpy()
mask = mask.reshape(mask.shape[1], mask.shape[2])
mask = mask_to_image(mask)
plt.subplot(1, 3, 2)
plt.title('mask')
plt.imshow(mask)
predict = mask_to_image(predict)
plt.subplot(1, 3, 3)
plt.title('pred')
plt.imshow(predict)
save_file = "save_fig_val"
os.makedirs(save_file, exist_ok=True)
plt.savefig(os.path.join(save_file, str(i) + '.png'))
synchronize()
def test(self):
self.model.eval()
if self.args.distributed:
model = self.model.module
else:
model = self.model
test_img_dir = '/home/wangjialei/projects/teeth_bad_case/'
img_folder = os.listdir(test_img_dir)
for iter, img_file in enumerate(img_folder):
img_name = test_img_dir + img_file
image = Image.open(img_name)
print(type(image))
img = data_process(image)
img = img.to(self.device)
with torch.no_grad():
outputs = model(img)
if self.args.save_pred:
pred = torch.argmax(outputs[0], 1)
pred = pred.cpu().data.numpy()
predict = pred.squeeze(0)
img_show = image
plt.subplot(1, 2, 1)
plt.title('image')
plt.imshow(img_show)
predict = mask_to_image(predict)
plt.subplot(1, 2, 2)
plt.title('pred')
plt.imshow(predict)
save_file = "save_fig_test"
os.makedirs(save_file, exist_ok=True)
plt.savefig(os.path.join(save_file, str(iter) + '.png'))
if __name__ == '__main__':
args = parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
if not args.no_cuda and torch.cuda.is_available():
cudnn.benchmark = True
args.device = "cuda"
else:
args.distributed = False
args.device = "cpu"
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
# TODO: optim code
args.save_pred = True
if args.save_pred:
outdir = '../runs/pred_pic/{}_{}'.format(args.model, args.dataset)
if not os.path.exists(outdir):
os.makedirs(outdir)
logger = setup_logger(args.model, args.log_dir, get_rank(),
filename='{}_{}_log.txt'.format(args.model, args.dataset), mode='a+')
evaluator = Evaluator(args)
# evaluator.eval()
evaluator.test()
|
{"/scripts/eval.py": ["/light/data/segdata.py"]}
|
7,816
|
paulmakepeace/descrypi
|
refs/heads/main
|
/descrypi/ssh.py
|
"""SSH helper."""
from descrypi.run import run
DEFAULT_SSH_USER = "pi"
# -T disables pseudo-tty since we don't need it (non-interactive)
SSH_COMMAND = ["ssh", "-T", "-o", "StrictHostKeyChecking=no"]
def ssh_user_host(host):
"""Return e.g. pi@192.168.1.2"""
return DEFAULT_SSH_USER + "@" + host
def ssh(host, command, stdin=None):
"""Run 'command' (list) on 'host' via ssh.
stdin is an string to send."""
return run([*SSH_COMMAND, ssh_user_host(host), *command], stdin=stdin)
|
{"/descrypi/ssh.py": ["/descrypi/run.py"], "/descrypi/passwd.py": ["/descrypi/ssh.py"], "/descrypi/network.py": ["/descrypi/ssh.py"], "/descrypi/arp.py": ["/descrypi/ieee_ra.py"], "/descrypi/copy_ssh_keys.py": ["/descrypi/run.py"]}
|
7,817
|
paulmakepeace/descrypi
|
refs/heads/main
|
/descrypi/macipsconfig.py
|
"""class MACIPsConfig."""
import os.path
import json
class MACIPsConfig:
"""Hosts database.
This class maintains a file of the IP -> MAC mappings between runs. Once a
MAC is found, its IP is recorded. It's then possible to assign a specific IP
address for later assignment on the machine.
The format is the same as the Ansible inventory file so it can be used to
drive more sophisticated automation using Ansible.
"""
ANSIBLE_INVENTORY_FILE = "hosts.json"
ANSIBLE_HOST_GROUP = "pi"
ANSIBLE_SSH_USER = "pi"
ANSIBLE_PYTHON = "/usr/bin/python"
INVENTORY_FILE = "hosts.json"
IP_NOT_SET = "Set this for a static IP"
def __init__(self):
self.file = self.INVENTORY_FILE
self.config = {
self.ANSIBLE_HOST_GROUP: {
"hosts": {},
"vars": {
"ansible_ssh_user": self.ANSIBLE_SSH_USER,
"ansible_python_interpreter": self.ANSIBLE_PYTHON,
},
}
}
self.load()
def dump(self):
"""Write out the database."""
with open(self.file, "w", encoding="utf-8") as f:
json.dump(self.config, f, indent=4)
def load(self):
"""Load JSON MAC -> IP assignments."""
if not os.path.exists(self.file):
self.dump()
with open(self.file, encoding="utf-8") as f:
self.config = json.load(f)
def hosts(self):
"""Return the actual hosts part of the inventory data structure."""
return self.config[self.ANSIBLE_HOST_GROUP]["hosts"]
def record(self, mac_ips):
"""Record the latest scan with the existing database.
Return a list of changes where 'new' is previously unseen MAC and
'assigned' is if the existing database has a manually assigned IP. In
this case, we don't update the database.
"""
changes = []
hosts = self.hosts()
for mac, ip, model in mac_ips:
if ip in hosts:
new = False
assigned = (
hosts[ip]["assigned"] != self.IP_NOT_SET and hosts[ip]["assigned"]
)
else:
new, assigned = True, False
changes.append((mac, ip, model, new, assigned))
if not assigned:
hosts[ip] = {"mac": mac, "model": model, "assigned": self.IP_NOT_SET}
self.dump()
return changes
def current_hosts(self):
"""Return a list of current hosts."""
return list(self.hosts().keys())
def current_hosts():
"""Shortcut to return all known hosts."""
return MACIPsConfig().current_hosts()
|
{"/descrypi/ssh.py": ["/descrypi/run.py"], "/descrypi/passwd.py": ["/descrypi/ssh.py"], "/descrypi/network.py": ["/descrypi/ssh.py"], "/descrypi/arp.py": ["/descrypi/ieee_ra.py"], "/descrypi/copy_ssh_keys.py": ["/descrypi/run.py"]}
|
7,818
|
paulmakepeace/descrypi
|
refs/heads/main
|
/descrypi/passwd.py
|
"""Change password.
The Pi will quite rightly issue a warning on each login if the default
password is not changed. This command updates the password.
"""
from getpass import getpass
import sys
from descrypi.ssh import ssh
from descrypi.ssh import DEFAULT_SSH_USER
def change_password(hosts):
"""Update user's password on each host using `passwd`."""
print(f"Changing password for {DEFAULT_SSH_USER}.")
current_password = (
getpass("Current password (<Return> for default): ") or "raspberry"
)
new_password = getpass("New password: ")
new_password2 = getpass("Retype new password: ")
if new_password != new_password2:
sys.stderr.write("Sorry, passwords do not match.\n")
sys.exit(1)
passwd_stdin = f"{current_password}\n{new_password}\n{new_password}\n"
for host in hosts:
print(f"Changing password on {host} ...")
if ssh(host, ["/usr/bin/passwd"], stdin=passwd_stdin):
print("passwd: password updated successfully!")
|
{"/descrypi/ssh.py": ["/descrypi/run.py"], "/descrypi/passwd.py": ["/descrypi/ssh.py"], "/descrypi/network.py": ["/descrypi/ssh.py"], "/descrypi/arp.py": ["/descrypi/ieee_ra.py"], "/descrypi/copy_ssh_keys.py": ["/descrypi/run.py"]}
|
7,819
|
paulmakepeace/descrypi
|
refs/heads/main
|
/bin/executable.py
|
"""Silly path mangling so bin/* can work."""
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], ".."))
|
{"/descrypi/ssh.py": ["/descrypi/run.py"], "/descrypi/passwd.py": ["/descrypi/ssh.py"], "/descrypi/network.py": ["/descrypi/ssh.py"], "/descrypi/arp.py": ["/descrypi/ieee_ra.py"], "/descrypi/copy_ssh_keys.py": ["/descrypi/run.py"]}
|
7,820
|
paulmakepeace/descrypi
|
refs/heads/main
|
/descrypi/ieee_ra.py
|
"""Query the IEEE Registration Authority for MAC prefixes.
Each physical device has a MAC address made of six parts, the first three
indicate who made the device. We use this prefix to identify the model/maker.
These prefixes are handed out by the IEEE Registration Authority and are
queryable at https://regauth.standards.ieee.org/standards-ra-web/pub/view.html
The Raspberry Pi folks have two MAC prefixes. It's possible in the future
they'll add another. This file demonstrates how to query the IEEE RA database
for those MAC prefixes.
For some reason the Rock Pi doesn't appear in the IEEE RA.
"""
import csv
import textwrap
import urllib.parse
import urllib.request
# Known MAC prefixes by model, sorted
MAC_PREFIX_MAP = {
"Raspberry Pi": ("B8:27:EB", "DC:A6:32"),
"Rock Pi": ("EA:62:9D",),
}
MAC_PREFIXES = [
mac for macs in MAC_PREFIX_MAP.values() for mac in macs
] # ['B8:27:EB', ...]
MAC_PREFIX_MODEL_MAP = dict(
# {"B8:27:EB": "Raspberry Pi", ...}
((mac, model) for model, macs in MAC_PREFIX_MAP.items() for mac in macs)
)
# URL to query the IEEE Registration Authority for updated MAC prefixes
# Browser URL is https://regauth.standards.ieee.org/standards-ra-web/pub/view.html
IEEE_RA_URL = (
"https://services13.ieee.org/RST/standards-ra-web/rest/assignments/download/"
)
def model_by_mac_prefix(mac_prefix):
"""Take a three octet MAC prefix and return the device model."""
return MAC_PREFIX_MODEL_MAP.get(mac_prefix.upper(), "Unknown")
def ieee_ra_url(query):
"""Return a URL to query IEEE Registration Authority's MAC database."""
# curl "https://services13.ieee.org/RST/standards-ra-web/rest/assignments/download/"\
# "?registry=MAC&text=%22raspberry%20pi%22"
params = urllib.parse.urlencode({"registry": "MAC", "text": query})
return IEEE_RA_URL + "?" + params
def macs_from_ieee_ra(query):
"""Query IEEE Reg Authority for `query`.
Data looks like,
Registry,Assignment,Organization Name,Organization Address
MA-L,DCA632,Raspberry Pi Trading Ltd,"Maurice Wilkes Building, Cowley Road Cambridge GB CB4 0DS "
MA-L,B827EB,Raspberry Pi Foundation,Mitchell Wood House Caldecote Cambridgeshire US CB23 7NU
"""
try:
with urllib.request.urlopen(ieee_ra_url(query)) as f:
reader = csv.reader(
(line.decode() for line in f), delimiter=",", quotechar='"'
)
next(reader) # skip header
# Split "01AB23" into "01:AB:23"
return tuple(sorted((":".join(textwrap.wrap(row[1], 2)) for row in reader)))
except urllib.error.HTTPError as err:
print("IEEE RA query failed:", err, "(may be transient; try again?)")
return ()
def check_ieee_macs(query='"raspberry pi"', model="Raspberry Pi"):
"""Check and return new MAC prefixes, else return False."""
macs = MAC_PREFIX_MAP[model]
new_macs = macs_from_ieee_ra(query)
return new_macs if macs != new_macs else False
|
{"/descrypi/ssh.py": ["/descrypi/run.py"], "/descrypi/passwd.py": ["/descrypi/ssh.py"], "/descrypi/network.py": ["/descrypi/ssh.py"], "/descrypi/arp.py": ["/descrypi/ieee_ra.py"], "/descrypi/copy_ssh_keys.py": ["/descrypi/run.py"]}
|
7,821
|
paulmakepeace/descrypi
|
refs/heads/main
|
/descrypi/run.py
|
"""Helper functions for running external programs.
"""
import shutil
import subprocess
import sys
def ensure_executable(app, brew_package=None, apt_package=None):
"""Ensure app is executable in $PATH or suggest how to get it."""
if not shutil.which(app):
if brew_package is None:
brew_package = app
if apt_package is None:
apt_package = app
sys.stderr.write(
f"Missing `{app}`. Try `brew install {brew_package}`, "
f"`apt-get install {apt_package}`, etc\n"
)
sys.exit(1)
def run(command, stdin=None):
"""Takes an a 'command' (array) and runs it with optional stdin."""
with subprocess.Popen(
command,
stdin=(subprocess.PIPE if stdin else None),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as process:
if stdin:
with process.stdin as f:
f.write(stdin.encode())
output = process.stdout.read().decode()
process.wait()
if process.returncode != 0:
sys.stderr.write("Failed: " + output)
return None
return output
|
{"/descrypi/ssh.py": ["/descrypi/run.py"], "/descrypi/passwd.py": ["/descrypi/ssh.py"], "/descrypi/network.py": ["/descrypi/ssh.py"], "/descrypi/arp.py": ["/descrypi/ieee_ra.py"], "/descrypi/copy_ssh_keys.py": ["/descrypi/run.py"]}
|
7,822
|
paulmakepeace/descrypi
|
refs/heads/main
|
/descrypi/network.py
|
"""Networking helpers.
Query for local and remote network Interfaces (interface, ip, subnet, gateway).
"""
import ipaddress
import re
import subprocess
import sys
from descrypi.ssh import ssh
# We're using the technically deprecated `ifconfig` because it works nearly
# identically on macOS (BSD) and Linux.
class Interface:
"""Interface is the config for a hardware interface.
* Interface name, e.g. eth0 (name)
* IPv4 IP address (ip)
* IPv4 Network (network)
* Gateway/router address (gateway)
Two class methods exist to fill in these fields from `ifconfig` and `ip route`.
"""
def __init__(self, interface, ip=None, network=None, gateway=None):
self.name = interface
self.ip = ip
self.network = network
self.gateway = gateway
# Break the parsing into two stages: extract the interface blocks, then
# extract the network info. Done as a single regex seemed to trigger a ton of
# backtracking that took minutes(!)
IFCONFIG_INTERFACES_RE = re.compile(
r"^(?P<interface>[a-z]+[0-9]+): .*<UP,.*\n"
+ r"^(?P<block>(?:\s+.*\n)+)", # indented info
re.MULTILINE,
)
# Linux: inet 169.254.46.250 netmask 255.255.0.0 broadcast 169.254.255.255
# macOS: inet 169.254.201.244 netmask 0xffff0000 broadcast 169.254.255.255
IFCONFIG_INET_RE = re.compile(
r"inet (?P<ip>\S+)\s+"
r"netmask\s+(?P<netmask>\S+)\s+"
r"broadcast\s+(?P<broadcast>\S+)"
)
def ifconfig(self, ifconfig):
"""Parse 'ifconfig' to fill out object info."""
match = self.IFCONFIG_INET_RE.search(ifconfig)
if match:
self.ip, netmask, broadcast = match.groups()
if netmask[0:2] == "0x":
netmask = bin(int(netmask, 16)).count("1")
self.network = str(
ipaddress.IPv4Network(f"{broadcast}/{netmask}", strict=False)
)
return self
@classmethod
def from_ifconfigs(cls, ifconfigs):
"""Return [Interface, ...] for UP ether interfaces."""
return (
cls(interface).ifconfig(block)
for (interface, block) in cls.IFCONFIG_INTERFACES_RE.findall(ifconfigs)
if "ether " in block and "inet " in block
)
# $ ip route
# default via 192.168.2.1 dev wlan0 proto dhcp src 192.168.2.65 metric 303
# 192.168.2.0/24 dev wlan0 proto dhcp scope link src 192.168.2.65 metric 303
#
# If there's no route it'll be missing the "via <gw>" (and probably proto):
# default dev eth0 scope link src 169.254.110.40 metric 202
# 169.254.0.0/16 dev eth0 scope link src 169.254.110.40 metric 202
IP_GATEWAY_IFACE_IP_RE = r"dev (?P<interface>(?:eth|wlan)0) .* src (?P<ip>[0-9.]+)"
IP_GATEWAY_RE = re.compile(
r"default (?:via (?P<gateway>[0-9.]+) )?" + IP_GATEWAY_IFACE_IP_RE
)
IP_SUBNET_RE = re.compile(r"(?P<subnet>[0-9.]+/[0-9]+) " + IP_GATEWAY_IFACE_IP_RE)
@classmethod
def from_ip_route(cls, ip_route, host):
"""Return Interface from `ip route` output."""
# Being pretty defensive cross-checking here as I'm less familiar with all
# the possibilities.
interface = ip = gateway = subnet = None
match = cls.IP_GATEWAY_RE.search(ip_route)
if match:
if match.group("ip") != host:
sys.stderr.write(
f"Found surprise host {match.group('ip')} in `ip route` "
f"(expected {host})\n"
)
else:
gateway, interface, ip = (
match.group("gateway"),
match.group("interface"),
match.group("ip"),
)
if gateway is None:
sys.stderr.write(f"Host {host} appears to be missing a gateway\n")
for match in cls.IP_SUBNET_RE.finditer(ip_route):
if match.group("interface") == interface and match.group("ip") == ip:
subnet = match.group("subnet")
if not subnet:
sys.stderr.write(f"Couldn't find {interface} and {ip} in remote `ip route`\n")
sys.exit(1)
return cls(interface, ip=host, network=subnet, gateway=gateway)
def remote_interface(host):
"""Query `ip route` on 'host' to return Interface."""
# This currently returns the config for the interface for 'host'. Maybe we
# want another interface's config. E.g. connect over Wi-Fi but configure
# Ethernet?
ip_route = ssh(host, ["ip", "route"])
if ip_route is None:
return None
return Interface.from_ip_route(ip_route, host)
def local_interfaces(interface=None):
"""Query `ifconfig` locally to return [Interface, ...]."""
command = ["ifconfig"]
if interface is not None:
command.append(interface)
with subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as process:
return Interface.from_ifconfigs(process.stdout.read().decode())
|
{"/descrypi/ssh.py": ["/descrypi/run.py"], "/descrypi/passwd.py": ["/descrypi/ssh.py"], "/descrypi/network.py": ["/descrypi/ssh.py"], "/descrypi/arp.py": ["/descrypi/ieee_ra.py"], "/descrypi/copy_ssh_keys.py": ["/descrypi/run.py"]}
|
7,823
|
paulmakepeace/descrypi
|
refs/heads/main
|
/descrypi/arp.py
|
"""Parse the ARP table for MAC and IP addresses.
We restrict `arp` usage to the BSD format so that it works on macOS and Linux
without getting into platform switching.
"""
import re
import subprocess
import descrypi.ieee_ra
# -a shows the current table. -n skips reverse DNS lookup (no use for name)
ARP_COMMAND = "arp -n -a".split()
# There are two `arp -a` outputs here, Linux and macOS. Fortunately the
# interesting part is the same.
# ? (169.254.201.244) at 00:3e:e1:c7:3b:26 [ether] on eth0
# ? (169.254.46.250) at dc:a6:32:19:1d:88 on en1 [ethernet]
# Regex to match known MAC prefixes, e.g., "B8:27:EB|DC:A6:32|EA:62:9D"
MAC_PREFIX_RE = "|".join(descrypi.ieee_ra.MAC_PREFIXES)
# Regex to catch rest of the MAC (suffix), e.g., "12:ab:9"
MAC_OCTET_TRIPLE_RE = ":".join([r"[0-9a-f]{1,2}"] * 3)
# Regex to extract IP, MAC, and its prefix from an `arp -a` line
MACS_RE = re.compile(
rf"""
^\S+[ ]
\((?P<ip>[^)]+)\)
[ ]at[ ]
(?P<mac>(?P<prefix>{MAC_PREFIX_RE}):(?:{MAC_OCTET_TRIPLE_RE}))
""",
re.MULTILINE | re.IGNORECASE | re.VERBOSE,
)
def find_pi_mac_ips():
"""Run `arp -a` and return array of (Pi MAC address, IP) tuples."""
with subprocess.Popen(ARP_COMMAND, stdout=subprocess.PIPE) as process:
arp = process.stdout.read().decode()
return [
(
m.group("mac"),
m.group("ip"),
descrypi.ieee_ra.model_by_mac_prefix(m.group("prefix")),
)
for m in MACS_RE.finditer(arp)
]
|
{"/descrypi/ssh.py": ["/descrypi/run.py"], "/descrypi/passwd.py": ["/descrypi/ssh.py"], "/descrypi/network.py": ["/descrypi/ssh.py"], "/descrypi/arp.py": ["/descrypi/ieee_ra.py"], "/descrypi/copy_ssh_keys.py": ["/descrypi/run.py"]}
|
7,824
|
paulmakepeace/descrypi
|
refs/heads/main
|
/descrypi/fping.py
|
#!/usr/bin/env python3
"""Wrapper for `fping` to rapidly ping the network to fill the ARP cache.
This is useful if the ARP cache expires: by pinging all addresses on an
interface any devices that respond will have their MAC address recorded
for descrypi to find.
Usage: descrypi scan [-i <interface, e.g. eth0>]
We rely on `fping` to do the actual pinging. This wrapper takes an interface
name and constructs a command line to perform the ping with the least
amount of traffic (AFAIK), as quickly as possible without requiring root.
Restricted to IPv4 for now.
"""
import subprocess
# Minimize packet size; only send two; single retry; quick timeout; 1ms between pings
# The second packet can help if the computer is asleep.
FPING_COMMAND = (
"fping --size 40 --count 2 --retry 1 --timeout=50 --interval 1 --generate %s"
)
def fping(network, progress=False):
"""Execute `fping` and return alive hosts.
A ICMP reply (or not) is reported as,
169.254.19.148 : xmt/rcv/%loss = 1/1/0%, min/avg/max = 0.25/0.25/0.25
169.255.255.254 : xmt/rcv/%loss = 0/0/0%
"""
command = (FPING_COMMAND % network).split()
with subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
).stdout as stdout:
lines = stdout.readlines()
alive = []
for line in (line.decode() for line in lines):
if "rcv" not in line:
continue
line = line.split()
ip_address, result = line[0], line[4]
if result[2] != "0": # rcv'ed something!
if progress:
print(f"{ip_address} is alive")
alive.append(ip_address)
return alive
def ping(hosts):
"""Ping 'hosts' using fping."""
with subprocess.Popen(
["fping"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as process:
with process.stdin as f:
f.write("\n".join(hosts).encode())
lines = (line.decode() for line in process.stdout.readlines())
return (line.split()[0] for line in lines if "is alive" in line)
|
{"/descrypi/ssh.py": ["/descrypi/run.py"], "/descrypi/passwd.py": ["/descrypi/ssh.py"], "/descrypi/network.py": ["/descrypi/ssh.py"], "/descrypi/arp.py": ["/descrypi/ieee_ra.py"], "/descrypi/copy_ssh_keys.py": ["/descrypi/run.py"]}
|
7,825
|
paulmakepeace/descrypi
|
refs/heads/main
|
/descrypi/copy_ssh_keys.py
|
"""Copy SSH keys.
A wrapper for the `ssh-copy-id` tool to copy over SSH keys, enabling
password-less login.
"""
from getpass import getpass
from descrypi import ssh
from descrypi.run import run
def ssh_copy_id(hosts):
"""Copy SSH keys using `ssh-copy-id`."""
print(f"Installing SSH keys for {ssh.DEFAULT_SSH_USER}.")
current_password = (
getpass("Current password (<Return> for default): ") or "raspberry"
)
ssh_copy_id_stdin = f"{current_password}\n"
for host in hosts:
target = ssh.ssh_user_host(host)
print(f"Installing SSH keys on {target} ...")
output = run(["/usr/bin/ssh-copy-id", target], stdin=ssh_copy_id_stdin)
if output:
print(output)
|
{"/descrypi/ssh.py": ["/descrypi/run.py"], "/descrypi/passwd.py": ["/descrypi/ssh.py"], "/descrypi/network.py": ["/descrypi/ssh.py"], "/descrypi/arp.py": ["/descrypi/ieee_ra.py"], "/descrypi/copy_ssh_keys.py": ["/descrypi/run.py"]}
|
7,826
|
paulmakepeace/descrypi
|
refs/heads/main
|
/descrypi/assign.py
|
"""Assign a static IP to a Pi.
Currently, the way to assign a static IP to a Pi is to update /etc/dhcpcd.conf
(Historically it was /etc/network/interfaces; no longer however.)
A sample /etc/dhcpcd.conf stanza:
```
interface eth0 # or wlan0 for WiFi
static ip_address=192.168.0.10/24
static routers=192.168.0.1
static domain_name_servers=192.168.0.1
```
So for a given assignment of an IP, we have to figure out:
* what interface this is on, i.e. wired (eth0) or wireless (wlan0)
* the subnet ("/24")
* the router (typically, but not necessarily, x.y.z.1)
* the domain name servers (often the same as the router plus ISP extras)
There are two ways of figuring these out:
* query the workstation
* query the Pi
A foundation is that the Pi and the our workstation are on the same network.
For example we find the Pi by relying on the ARP cache which itself requires
being on the same network. Being on the same network means both interfaces
must be configured the same, so we can query either.
Alternatively, we can query the Pi itself. This will yield network information
for interfaces that are up, but won't help us configure down interfaces.
"""
# import descrypi.network
|
{"/descrypi/ssh.py": ["/descrypi/run.py"], "/descrypi/passwd.py": ["/descrypi/ssh.py"], "/descrypi/network.py": ["/descrypi/ssh.py"], "/descrypi/arp.py": ["/descrypi/ieee_ra.py"], "/descrypi/copy_ssh_keys.py": ["/descrypi/run.py"]}
|
7,832
|
privm/TGbotsimpleaf
|
refs/heads/main
|
/main.py
|
from telegram import Update
from telegram.ext import Updater, CommandHandler, CallbackContext
from telegram import InputMediaPhoto
# import modules
import helloworld
# token for @yourTGbot
updater = Updater('bot_key_here')
# calling a function from a module with a callback
# the logic is ("command's name", callback = module name.function name)
updater.dispatcher.add_handler(CommandHandler('helloworld', callback = helloworld.helloworld))
# add the next ones
updater.start_polling()
updater.idle()
|
{"/main.py": ["/helloworld.py"]}
|
7,833
|
privm/TGbotsimpleaf
|
refs/heads/main
|
/helloworld.py
|
from telegram import Update
from telegram.ext import Updater, CommandHandler, CallbackContext
def helloworld(update: Update, context: CallbackContext) -> None:
update.message.reply_text('Hello world!')
|
{"/main.py": ["/helloworld.py"]}
|
7,834
|
guillaume-havard/test_automation_heroku
|
refs/heads/main
|
/tests/test_main.py
|
import main
def test_add_two_int():
assert main.func_add(3, 3) == 6
|
{"/tests/test_main.py": ["/main.py"]}
|
7,835
|
guillaume-havard/test_automation_heroku
|
refs/heads/main
|
/main.py
|
import flask
def func_add(a: int, b: int) -> int:
return a + b
app = flask.Flask(__name__)
@app.route("/")
def index():
return "<h1 style='color:black'>Coucou !</h1>"
@app.route("/pouet")
def pouet():
return "Pouet !"
|
{"/tests/test_main.py": ["/main.py"]}
|
7,841
|
natecraddock/page-zipper
|
refs/heads/master
|
/widgets.py
|
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
from tkinter.font import Font
import os
import utils
import zipper
class LabeledEntry(tk.Frame):
'''A tkinter widget for a labeled entry'''
def __init__(self, parent, label="Entry:"):
tk.Frame.__init__(self, parent)
tk.Label(self, text=label).grid(row=0, column=0, sticky='nse')
tk.Entry(self, textvariable=self.entry).grid(row=0, column=1, sticky='nsew', padx=5)
self.columnconfigure(0, weight=0)
self.columnconfigure(1, weight=1)
def set(self, value):
self.entry.set(value)
def get(self):
return self.entry.get()
class LabeledIntEntry(LabeledEntry):
def __init__(self, parent, label="Entry:"):
self.entry = tk.IntVar()
LabeledEntry.__init__(self, parent, label)
class LabeledStringEntry(LabeledEntry):
def __init__(self, parent, label="Entry:"):
self.entry = tk.StringVar()
LabeledEntry.__init__(self, parent, label)
class DirectoryBrowser(tk.Frame):
'''A tkinter widget for a labeled directory browser'''
def __init__(self, parent, label="Choose Folder:", callback=lambda: None):
tk.Frame.__init__(self, parent)
self.callback = callback
# Create GUI Elements for the Directory Browser Widget
self.path = LabeledStringEntry(self, label=label)
self.path.grid(row=0, column=0, sticky='nsew')
tk.Button(self, text="Browse", command=self.browse).grid(row=0, column=1, sticky='nsw')
# Weights
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=0)
def browse(self):
if os.path.exists(self.path.get()):
directory = filedialog.askdirectory(initialdir=self.path.get())
else:
directory = filedialog.askdirectory(initialdir=os.getcwd())
if directory:
if os.path.exists(directory):
self.path.set(directory)
self.callback()
class ProgressPopup(tk.Toplevel):
'''Displays progress with progressbar'''
def __init__(self, title, steps=100):
tk.Toplevel.__init__(self)
self.fixed_font = Font(size=10)
self.line_height = self.fixed_font.metrics("linespace")
self.title(title)
tk.Label(self, text=title).grid(row=0, column=0, sticky='w', padx=5, pady=20, columnspan=2)
self.progress = ttk.Progressbar(self, orient='horizontal', length=200, mode='determinate', maximum=100)
self.progress.grid(row=1, column=0, sticky='ew', padx=5, columnspan=2)
self.scrollbar = tk.Scrollbar(self)
self.scrollbar.grid(row=2, column=1, sticky='nesw', pady=10)
self.log = tk.Canvas(self, background='#FFFFFF', width=500, height=150, yscrollcommand=self.scrollbar.set)
self.log.grid(row=2, column=0, sticky='nesw', pady=10)
self.log.line_number = 0
self.scrollbar.config(command=self.log.yview)
self.grab_set()
self.step = 100.0 / steps
def next(self):
self.progress['value'] += self.step
self.update()
def log_message(self, line):
self.log.create_text(0, (self.line_height * self.log.line_number), font=self.fixed_font, text=line, anchor='nw')
self.log.line_number += 1
self.log.configure(scrollregion=self.log.bbox('all'))
self.log.yview_moveto(1)
self.update()
# TODO: Add option to set prefix on the rename
class PrefixEntry(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.prefix = LabeledStringEntry(self, label="File Prefix:")
self.prefix.grid(row=0, column=1, sticky='nsew', padx=5)
self.prefix.set("img_")
class OutputFrame(tk.Frame):
'''A frame for a thumbnail viewer and browser'''
def __init__(self, root, callback=lambda: None):
self.pages = []
tk.Frame.__init__(self, root)
self.viewer = ThumbnailViewer(self, group=False)
self.browser = DirectoryBrowser(self, "Output Path:")
self.prefix = PrefixEntry(self)
self.save_button = tk.Button(self, text="Save", command=callback)
self.viewer.grid(row=0, column=0, sticky='nesw', padx=10, pady=5)
self.browser.grid(row=1, column=0, sticky='nesw', padx=10)
self.prefix.grid(row=2, column=0, sticky='nsw', padx=10, pady=5)
self.save_button.grid(row=3, column=0, sticky='', pady=5)
self.columnconfigure(0, weight=1)
class PagesFrame(tk.Frame):
'''A frame for a thumbnail viewer and browser'''
def __init__(self, root, label, callback=lambda: None):
self.pages = []
self.callback = callback
tk.Frame.__init__(self, root)
tk.Label(self, text=label).grid(row=0, column=0, sticky='w', padx=5, pady=5, columnspan=2)
self.browser = DirectoryBrowser(self, "")
self.viewer = ThumbnailViewer(self, group=True, callback=self.on_viewer_update)
self.browser.grid(row=1, column=0, sticky='nesw', padx=10)
self.viewer.grid(row=2, column=0, sticky='nesw', padx=10, pady=5)
self.columnconfigure(0, weight=1)
self.browser.callback = self.on_input
# Create a widget that displays a progressbar as the pages are loaded
def load_pages(self, directory):
paths = os.listdir(directory)
paths.sort()
if len(paths) > 0:
progress = ProgressPopup("Loading Pages", len(paths))
temp = []
for path in paths:
if not os.path.isdir(os.path.join(directory, path)):
p = utils.Page(os.path.join(directory, path))
if p.thumb is not None:
temp.append(p)
progress.next()
progress.log_message("Created thumbnail for {}".format(path))
progress.destroy()
return temp
else:
print("No images found")
def on_input(self):
path = self.browser.path.get()
# Load pages, then draw in ImageViewer
self.pages = self.load_pages(path)
if self.pages:
self.viewer.reload_pages(self.pages)
def on_viewer_update(self):
self.callback()
class ThumbnailViewer(tk.Frame):
'''A frame that holds a canvas for loaded images. Can scroll horizontally'''
def __init__(self, root, group=False, callback=lambda:None, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.root = root
self.pages_in = []
self.pages = []
self.hit_boxes = []
self.selected = []
self.scroll_location = None
self.callback = callback
self.use_groups = group
self.canvas = tk.Canvas(self, background='#FFFFFF', height=int(utils.Page.size * 0.85), width=800)
self.scrollbar = tk.Scrollbar(self, orient='horizontal', command=self.canvas.xview)
self.canvas.configure(xscrollcommand=self.scrollbar.set)
self.canvas.grid(row=0, column=0, sticky='ew', columnspan=2)
self.scrollbar.grid(row=1, column=0, sticky='nesw', columnspan=2)
if group:
frame = tk.Frame(self, bd=0)
frame.grid(row=2, column=0, sticky='w', pady=5)
tk.Button(frame, text='Group', command=self.group).grid(row=0, column=0, sticky='w')
tk.Button(frame, text='Ungroup', command=self.ungroup).grid(row=0, column=1, sticky='w')
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.canvas.configure(scrollregion=self.canvas.bbox('all'))
self.canvas.bind("<Button-1>", self.on_click)
self.slider = self.scrollbar.get()
def reload_pages(self, pages_in=None):
if pages_in is not None:
self.pages_in = pages_in
self.pages = self.pages_in[:]
self.update()
def update(self):
self.selected = []
self.draw()
self.callback()
def draw(self):
# CLEAR ALL
self.canvas.delete('all')
self.hit_boxes = []
self.scroll_location = self.scrollbar.get()
for i, page in enumerate(self.pages):
spacing = 20
size = utils.Page.size
n = str(i + 1).zfill(len(str(len(self.pages))))
# Create a frame for the image and text and save it to a list
box = (size * i) + (i * spacing), 1, (size * i) + (i * spacing) + size, utils.Page.size + 25
if i in self.selected:
self.canvas.create_rectangle(box, fill='lightblue', outline='', tags="background")
if type(page) is utils.Page:
name = n + " " + page.name
self.canvas.create_image((size * i) + (i * spacing), spacing / 2, image=page.thumb, anchor="nw")
self.canvas.create_text((size * i) + (i * spacing) + (size / 2), int(size * 0.75), font=("tkdefaultfont", 10), text=name, anchor="n")
elif type(page) is utils.PageGroup:
# Draw first page of the group
self.canvas.create_image((size * i) + (i * spacing), spacing / 2, image=page.pages[0].thumb, anchor="nw")
self.canvas.create_text((size * i) + (i * spacing) + (size / 2), int(size * 0.75), font=("tkdefaultfont", 10), text=n + " " + page.name)
self.hit_boxes.append(self.canvas.create_rectangle(box, fill='', outline='', tags="hitbox"))
self.canvas.configure(scrollregion=self.canvas.bbox('all'))
self.scrollbar.set(self.scroll_location[0], self.scroll_location[1])
def on_click(self, event):
if self.use_groups:
item = self.canvas.find_withtag('current')
# Page selected
if 'hitbox' in self.canvas.gettags(item):
index = self.hit_boxes.index(item[0])
if not index in self.selected:
self.selected.append(index)
else:
self.selected.remove(index)
self.selected.sort()
self.draw()
# TODO: Groups don't handle gaps well
def group(self):
if self.selected:
first_index = self.selected[0]
group = utils.PageGroup([self.pages.pop(first_index) for i in reversed(self.selected)])
# Replace the grouped pages with the page group
self.pages.insert(first_index, group)
self.update()
else:
messagebox.showerror("Error", "No images are selected")
def ungroup(self):
if self.selected:
temp = []
for p in self.pages:
if type(p) is utils.PageGroup and self.pages.index(p) in self.selected:
temp.extend(p.pages)
else:
temp.append(p)
self.pages = temp[:]
self.update()
else:
messagebox.showerror("Error", "No page groups are selected")
|
{"/widgets.py": ["/utils.py", "/zipper.py"], "/utils.py": ["/widgets.py"], "/__main__.py": ["/ui.py"], "/zipper.py": ["/utils.py", "/widgets.py"], "/ui.py": ["/widgets.py", "/utils.py", "/zipper.py", "/updater.py"]}
|
7,842
|
natecraddock/page-zipper
|
refs/heads/master
|
/utils.py
|
import tkinter as tk
import tempfile
import os
import shutil
import sys
from PIL import Image
import io
import widgets
class Page:
'''A page object that contains the path to an image file, and the loaded thumbnail of that file'''
size = 250
def __init__(self, path):
self.path = path
self.name = os.path.splitext(os.path.basename(self.path))[0]
self.thumb = self.make_thumbnail()
# Returns None if no image can be loaded
def make_thumbnail(self):
try:
image = Image.open(self.path)
image.thumbnail((Page.size, Page.size))
b = io.BytesIO()
image.save(b, 'gif')
return tk.PhotoImage(data=b.getvalue())
except (OSError, IsADirectoryError) as err:
print("Error Loading Image: {0}".format(err))
return None
# TODO: include group image
class PageGroup:
'''An object that holds a group of pages'''
def __init__(self, pages):
self.pages = pages
self.name = "Group"
def _create_backup(files, path):
backup = tempfile.TemporaryDirectory()
for file in files:
origin = os.path.join(path, file)
destination = os.path.join(backup.name, file)
if os.path.isfile(origin):
shutil.copy2(origin, destination)
else:
shutil.copytree(origin, destination)
return backup
def _restore_backup(backup, path):
if os.path.exists(path):
shutil.rmtree(path)
shutil.copytree(backup.name, path)
def rename_files(path, start_number, prefix):
files = os.listdir(path)
files.sort()
progress = widgets.ProgressPopup("Renaming Files", len(files))
progress.log_message("Creating backup")
backup = _create_backup(files, path)
digits = len(os.listdir(path)) + start_number
# Create temporary directory for renaming
renamed_directory = path + "_renamed"
if os.path.exists(renamed_directory):
shutil.rmtree(renamed_directory)
os.mkdir(renamed_directory)
try:
progress.log_message("Copying Files")
# Move all of the files to a new directory, with new names
for file in files:
origin = os.path.join(path, file)
if os.path.isfile(origin):
end = os.path.splitext(file)[1]
file_name = str(start_number).zfill(len(str(digits))) + end
file_name = prefix + file_name
dest = os.path.join(renamed_directory, file_name)
shutil.copyfile(origin, dest)
progress.next()
progress.log_message("Renamed {0} as {1}".format(origin, file_name))
start_number += 1
else:
dest = os.path.join(renamed_directory, file)
shutil.copytree(origin, dest)
progress.next()
progress.log_message("Did not modify {0}".format(origin))
progress.log_message("Rename Completed")
except:
progress.log_message("Rename failed, restoring backup")
_restore_backup(backup, path)
progress.destroy()
|
{"/widgets.py": ["/utils.py", "/zipper.py"], "/utils.py": ["/widgets.py"], "/__main__.py": ["/ui.py"], "/zipper.py": ["/utils.py", "/widgets.py"], "/ui.py": ["/widgets.py", "/utils.py", "/zipper.py", "/updater.py"]}
|
7,843
|
natecraddock/page-zipper
|
refs/heads/master
|
/__main__.py
|
# Page Zipper v1.1
# Nathan Craddock 2018
import ui
ui.create_window()
|
{"/widgets.py": ["/utils.py", "/zipper.py"], "/utils.py": ["/widgets.py"], "/__main__.py": ["/ui.py"], "/zipper.py": ["/utils.py", "/widgets.py"], "/ui.py": ["/widgets.py", "/utils.py", "/zipper.py", "/updater.py"]}
|
7,844
|
natecraddock/page-zipper
|
refs/heads/master
|
/zipper.py
|
import os
import shutil
import itertools
import utils
import widgets
def ungroup(merged):
temp = []
for p in merged:
if type(p) is utils.PageGroup:
temp.extend(p.pages)
else:
temp.append(p)
return temp[:]
def copy_files(files, out, pre="img_"):
progress = widgets.ProgressPopup("Saving Images", len(files))
for i in range(len(files)):
new_file = pre + str(str(i + 1).zfill(len(str(len(files)))) + os.path.splitext(files[i].path)[1]) #SUPER GROSS
new_path = os.path.join(out, new_file)
shutil.copy2(files[i].path, new_path)
progress.next()
progress.log_message("Copied {0} to {1}".format(files[i].path, new_path))
progress.destroy()
def merge_lists(a, b):
return [j for i in itertools.zip_longest(a, b) for j in i if j]
# Testing
def clear_dir(path):
for f in os.listdir(path):
os.remove(path + os.sep + f)
|
{"/widgets.py": ["/utils.py", "/zipper.py"], "/utils.py": ["/widgets.py"], "/__main__.py": ["/ui.py"], "/zipper.py": ["/utils.py", "/widgets.py"], "/ui.py": ["/widgets.py", "/utils.py", "/zipper.py", "/updater.py"]}
|
7,845
|
natecraddock/page-zipper
|
refs/heads/master
|
/ui.py
|
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import sys
import os
import webbrowser
import widgets
import utils
import zipper
import updater
version = 1.2
class HelpFrame(tk.Frame):
def __init__(self, parent):
global version
tk.Frame.__init__(self, parent)
tk.Label(parent, text=f"Page Zipper v{version}", font=("tkdefaultfont", 18)).grid(row=0, pady=10)
text = "Page Zipper is a tool to aid in the document capture process. It is designed to merge (zip) right and left captured pages of books."
tk.Message(parent, text=text, width=600).grid(row=1, pady=10)
readme = tk.Label(parent, text="View the Readme on GitHub", fg='blue', cursor='hand2')
readme.bind("<Button-1>", lambda e, l=r'https://github.com/natecraddock/page-zipper/': webbrowser.open(l))
readme.grid(row=2, pady=5, sticky='ew')
issue = tk.Label(parent, fg="blue", text="Report an issue on GitHub", cursor="hand2")
issue.bind("<Button-1>", lambda e, l=r'https://github.com/natecraddock/page-zipper/issues': webbrowser.open(l))
issue.grid(row=3, pady=5, sticky='ew')
tk.Label(parent, text="If you need help, email me at:").grid(row=4, pady=10)
email = tk.Label(parent, text="nzcraddock@gmail.com (click to copy to clipboard)", fg="blue", cursor="hand2")
email.bind("<Button-1>", lambda _: self.clip())
email.grid(row=5, pady=5, sticky='ew')
def clip(self):
self.clipboard_clear()
self.clipboard_append('nzcraddock@gmail.com')
self.update()
class RenameFrame(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.browser = widgets.DirectoryBrowser(self, "Rename files in folder:")
self.browser.grid(row=0, column=0, sticky='nesw', padx=5, columnspan=2)
self.number = widgets.LabeledIntEntry(self, label="Starting Number:")
self.number.grid(row=1, column=0, sticky='nsw')
self.number.set(1)
self.prefix = widgets.PrefixEntry(self)
self.prefix.grid(row=2, column=0, sticky='nsw')
self.rename_button = tk.Button(self, text='Rename Files', command=self.rename)
self.rename_button.grid(row=3, column=0, columnspan=2, sticky='nesw', padx=5)
def rename(self):
if os.path.exists(self.browser.path.get()):
utils.rename_files(self.browser.path.get(), self.number.get(), self.prefix.prefix.get())
else:
messagebox.showerror("Error", "No directory specified, or path is invalid")
class PageZipperWindow:
def __init__(self, root):
global version
self.root = root
self.root.title(f"Page Zipper v{version}")
# Create dictionary variables for the three UI areas
self.left = {'valid':False, 'pages':[]}
self.right = {'valid':False, 'pages':[]}
self.utils = {}
self.output = {'valid':False, 'pages':[]}
self.create_gui()
# Check version
self.check_version()
def create_gui(self):
self.notebook = ttk.Notebook(self.root)
self.input_tab = tk.Frame(self.notebook)
self.output_tab = tk.Frame(self.notebook)
self.utils_tab = tk.Frame(self.notebook)
self.help_tab = tk.Frame(self.notebook)
self.notebook.add(self.input_tab, text="Input")
self.notebook.add(self.output_tab, text="Output")
self.notebook.add(self.utils_tab, text="Utilities")
self.notebook.add(self.help_tab, text="Help")
self.notebook.grid(row=0, column=0, sticky='nesw')
# Add Help Frame
HelpFrame(self.help_tab).grid(row=0, column=0, sticky='nesw')
# Create Frames for each area
self.left_frame = widgets.PagesFrame(self.input_tab, "Left", self.on_viewer_update)
self.right_frame = widgets.PagesFrame(self.input_tab, "Right", self.on_viewer_update)
self.output_frame = widgets.OutputFrame(self.output_tab, self.save_files)
self.utils_frame = tk.Frame(self.utils_tab)
# Align to grid
self.left_frame.grid(row=0, column=0, sticky='nesw', pady=15)
ttk.Separator(self.input_tab, orient='horizontal').grid(row=1, column=1, sticky='ew')
self.right_frame.grid(row=2, column=0, sticky='nesw', pady=15)
self.output_frame.grid(row=0, column=0, sticky='nesw', padx=5)
self.utils_frame.grid(row=0, column=0, sticky='nsew', pady=15)
# Make each frame expand
self.left_frame.columnconfigure(0, weight=1)
self.right_frame.columnconfigure(0, weight=1)
self.output_frame.columnconfigure(0, weight=1)
self.utils_frame.columnconfigure(0, weight=1)
# Populate the Uilities Tab
self.renamer = RenameFrame(self.utils_frame)
self.renamer.grid(row=0, column=0, sticky='nsew')
self.renamer.columnconfigure(0, weight=1)
# For horizontal expanding of all widgets
self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(0, weight=1)
self.notebook.columnconfigure(0, weight=1)
self.notebook.rowconfigure(0, weight=1)
self.input_tab.columnconfigure(0, weight=1)
self.output_tab.columnconfigure(0, weight=1)
self.help_tab.columnconfigure(0, weight=1)
self.utils_tab.columnconfigure(0, weight=1)
def on_viewer_update(self):
merged = zipper.merge_lists(self.right_frame.viewer.pages, self.left_frame.viewer.pages)
merged = zipper.ungroup(merged)
self.output_frame.viewer.reload_pages(merged)
def save_files(self):
output_path = self.output_frame.browser.path.get()
if self.left_frame.viewer.pages and self.right_frame.viewer.pages and output_path:
if messagebox.askokcancel("Proceed?", "Saving may overwrite some files in {0}".format(output_path)):
zipper.clear_dir(self.output_frame.browser.path.get())
zipper.copy_files(self.output_frame.viewer.pages, output_path, self.output_frame.prefix.prefix.get())
else:
print("no write")
else:
messagebox.showerror("Error", "No input/output directories selected")
def check_version(self):
global version
updater.check_for_updates(version)
def create_window():
root = tk.Tk()
PageZipperWindow(root)
root.update()
root.minsize(root.winfo_reqwidth(), root.winfo_reqheight())
root.resizable(width=True, height=True)
# Ensure the icon works both from the python file, and the pyinstaller executable
icon_file = "icon.ico"
if not hasattr(sys, "frozen"):
icon_file = os.path.join(os.path.dirname(__file__), icon_file)
else:
icon_file = os.path.join(sys._MEIPASS, icon_file)
try:
root.iconbitmap(default=icon_file)
except:
print("Icon failed")
root.mainloop()
|
{"/widgets.py": ["/utils.py", "/zipper.py"], "/utils.py": ["/widgets.py"], "/__main__.py": ["/ui.py"], "/zipper.py": ["/utils.py", "/widgets.py"], "/ui.py": ["/widgets.py", "/utils.py", "/zipper.py", "/updater.py"]}
|
7,846
|
natecraddock/page-zipper
|
refs/heads/master
|
/updater.py
|
import requests
from tkinter import messagebox
import webbrowser
# Returns tag from latest release on a given repository
def get_tag_name(repository_name):
try:
request = requests.get(f"https://api.github.com/repos/{repository_name}/releases/latest")
if request.status_code == 200:
return request.json()["tag_name"]
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
except:
raise Exception("Failed to make request")
def check_for_updates(v):
print("Checking for updates...", end=" ")
repository = "natecraddock/page-zipper"
tag = get_tag_name(repository)
# Strip text
tag = tag[1:]
version = float(tag)
# If latest version is greater than current version
if version > v:
print("Update found!")
result = messagebox.askyesno("Updates Found", f"An updated version of Page Zipper has been found (v{version}), would you like to download the update?")
if result:
webbrowser.open(f"https://www.github.com/{repository}/releases/latest")
else:
print("Not updating")
if __name__ == "__main__":
check_for_updates(1.2)
|
{"/widgets.py": ["/utils.py", "/zipper.py"], "/utils.py": ["/widgets.py"], "/__main__.py": ["/ui.py"], "/zipper.py": ["/utils.py", "/widgets.py"], "/ui.py": ["/widgets.py", "/utils.py", "/zipper.py", "/updater.py"]}
|
7,899
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resources/iam/roles/pipeline_role.py
|
import sys
from troposphere import (
GetAtt,
Ref,
Sub
)
from troposphere.iam import (
Policy,
Role
)
from ozone.resources.iam.roles import role_trust_policy
from ozone.filters.arns import (
s3_bucket as filter_s3bucket
)
def _s3_access(**kwargs):
s3_bucket = filter_s3bucket(kwargs['Bucket'], True)
policy = Policy(
PolicyName="LambdaLayers-S3Access",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
'Effect': 'Allow',
'Resource': [
s3_bucket
],
'Action': [
's3:PutObject',
's3:PutObjectVersion',
's3:GetObject',
's3:GetObjectVersion'
]
}
]
}
)
return policy
def _invoke_lambda_access(**kwargs):
policy = Policy(
PolicyName="Pipeline-LambdaAccess",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
'Effect': 'Allow',
'Resource': [
'*'
],
'Action': [
'lambda:Invoke',
'lambda:InvokeFunction',
'lambda:List*',
'lambda:Get*'
]
}
]
}
)
return policy
def _deploy_cloudformation_access(**kwargs):
policy = Policy(
PolicyName="LambdaLayers-PassRole",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"iam:PassRole"
],
"Resource": "*",
"Effect": "Allow",
"Condition": {
"StringEqualsIfExists": {
"iam:PassedToService": [
"cloudformation.amazonaws.com",
]
}
}
},
{
"Effect": "Allow",
"Resource": "*",
"Action": [
"cloudformation:CreateStack",
"cloudformation:DeleteStack",
"cloudformation:DescribeStacks",
"cloudformation:UpdateStack",
"cloudformation:CreateChangeSet",
"cloudformation:DeleteChangeSet",
"cloudformation:DescribeChangeSet",
"cloudformation:ExecuteChangeSet",
"cloudformation:SetStackPolicy",
"cloudformation:ValidateTemplate"
]
}
]
}
)
return policy
def _source_codecommit_access(**kwargs):
policy = Policy(
PolicyName="LambdaLayers-CodeCommitAccess",
PolicyDocument={
"Version": "2012-10-17",
"Statement" : [
{
"Resource": "*",
"Effect": "Allow",
"Action": [
"codecommit:CancelUploadArchive",
"codecommit:GetBranch",
"codecommit:GetCommit",
"codecommit:GetUploadArchiveStatus",
"codecommit:UploadArchive"
]
}
]
}
)
return policy
def _build_codebuild_access(**kwargs):
policy = Policy(
PolicyName="LambdaLayers-CodeBuildAccess",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"codebuild:BatchGetBuilds",
"codebuild:StartBuild"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
)
return policy
def pipelinerole_build(**kwargs):
if not 'Bucket' in kwargs.keys():
raise KeyError(f'Bucket is required to provide access to artifact store')
policies = []
policies.append(
_s3_access(**kwargs)
)
stages = {
'Source': ['AmazonS3', 'CodeCommit', 'AmazonEcr'],
'Build': ['CodeBuild'],
'Test': ['CodeBuild'],
'Deploy': ['AmazonS3', 'Cloudformation', 'CodeDeploy', 'ServiceCatalog'],
'Invoke': ['Lambda']
}
for stage in stages:
for service in stages[stage]:
key = f'Use{service}'
func_name = f'_{stage.lower()}_{service.lower()}_access'
if key in kwargs.keys() and kwargs[key]:
try:
func = getattr(sys.modules[__name__], func_name)
policies.append(func(**kwargs))
except KeyError as error:
raise KeyError(error)
except AttributeError:
pass
role = Role(
"CodePipelineRole",
AssumeRolePolicyDocument=role_trust_policy('codepipeline'),
Policies=policies
)
return role
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,900
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resources/iam/policies/s3_replica_kms.py
|
from troposphere import Sub
from troposphere.iam import Policy
from ozone.filters.arns import (
s3_bucket as filter_s3bucket,
kms_key as filter_kmskey,
kms_alias as filter_keyalias
def _allow_s3_replication_source_access(source_bucket):
"""
Args:
source_bucket: name or ARN of the source bucket for replication
Returns:
statement dict()
"""
statement = {
"Sid": "AllowObjectReplicationAccessFromSourceBucket",
"Action": [
"s3:ListBucket",
"s3:GetReplicationConfiguration",
"s3:GetObjectVersionForReplication",
"s3:GetObjectVersionAcl",
"s3:GetObjectVersionTagging"
],
"Effect": "Allow",
"Resource": [
f'{source_bucket}',
f'{source_bucket}/*'
]
}
return statement
def _allow_s3_replication_replica_access(replica_bucket, replica_key):
"""
Args:
replica_bucket: name or ARn of the replica bucket
Returns:
statement dict()
"""
statement = {
"Sid": "AllowObjectReplicationToReplicaBucket",
"Action": [
"s3:ReplicateObject",
"s3:ReplicateDelete",
"s3:ReplicateTags",
"s3:GetObjectVersionTagging"
],
"Effect": "Allow",
"Condition": {
"StringLikeIfExists": {
"s3:x-amz-server-side-encryption": [
"aws:kms",
"AES256"
],
"s3:x-amz-server-side-encryption-aws-kms-key-id": [
replica_key
]
}
},
"Resource": f'{replica_bucket}/*'
}
return statement
def _allow_kms_decrypt_source(source_bucket, source_key):
"""
Args:
source_bucket: name or ARN of the source bucket for replication
source_key: Key ID / ARN of the key used to decrypt source
Returns:
statement dict()
"""
statement = {
"Sid": "AllowKmsDecryptForObjectFromSourceBucket",
"Action": [
"kms:Decrypt"
],
"Effect": "Allow",
"Condition": {
"StringLike": {
"kms:ViaService": Sub("s3.${AWS::Region}.${AWS::URLSuffix}"),
"kms:EncryptionContext:aws:s3:arn": [
f'{source_bucket}',
f'{source_bucket}/*'
]
}
},
"Resource": [
source_key
]
}
return statement
def _allow_kms_encrypt_replica(replica_bucket, replica_key):
"""
Args:
replica_bucket: name or ARN of the replica bucket for replication
replica_key: Key ID / ARN of the key used to encrypt source
Returns:
statement dict()
"""
statement = {
"Sid": "AllowKmsEncryptForObjectToReplicaBucket",
"Action": [
"kms:Encrypt"
],
"Effect": "Allow",
"Condition": {
"StringLike": {
"kms:ViaService": Sub("s3.${ReplicaRegion}.${AWS::URLSuffix}"),
"kms:EncryptionContext:aws:s3:arn": [
f'{replica_bucket}/*'
]
}
},
"Resource": [
replica_key
]
}
return statement
def iam_role_replica_with_kms(source_bucket, replica_bucket, source_key, replica_key):
"""
Args:
source_bucket: name/ARN of the source bucket
replica_bucket: name/ARN of the replica bucket
source_key: source_key ID or Alias for KMS encryption
replica_key: key ID or Alias for KMS Encryption in the replica region
Returns:
policy Policy()
"""
source_bucket = filter_s3bucket(source_bucket)
replica_bucket = filter_s3bucket(replica_bucket)
try:
filter_replica_key = filter_kmskey(replica_key)
except ValueError as error:
pass
try:
filter_replica_key = filter_keyalias(replica_key)
except:
raise ValueError("Double failure")
try:
filter_source_key = filter_kmskey(source_key)
except ValueError:
pass
try:
filter_source_key = filter_keyalias(source_key)
except ValueError:
raise ValueError ("the KMS Key input is neither a valid Key ID or Key Alias")
assert source_bucket
assert replica_bucket
statement = []
statement.append(_allow_s3_replication_source_access(source_bucket))
statement.append(_allow_s3_replication_replica_access(replica_bucket, filter_replica_key))
statement.append(_allow_kms_decrypt_source(source_bucket, filter_source_key))
statement.append(_allow_kms_encrypt_replica(replica_bucket, filter_replica_key))
policy_doc = {
"Version": "2012-10-17",
"Statement": statement
}
policy = Policy(
PolicyName="AllowBucketReplicationWithKmsEncryptDecrypt",
PolicyDocument=policy_doc
)
return policy
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,901
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/__init__.py
|
# -*- coding: utf-8 -*-
"""Top-level package for ozone."""
__author__ = """John Mille"""
__email__ = 'john@lambda-my-aws.io'
__version__ = '0.1.0'
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,902
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/tags/__init__.py
|
from troposphere.cloudformation import AWSCustomObject
from troposphere import Tags
class IamRoleTags(AWSCustomObject):
"""
Class used to call a Lambda function as part of the template to resolve some values"
"""
resource_type = "Custom::RoleTags"
props = {
'ServiceToken': (str, True),
'RoleName': (str, True),
'Tags': (Tags, True)
}
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,903
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resources/kms/alias.py
|
from troposphere.kms import (
Alias
)
def alias_build(**kwargs):
"""
Args:
kwargs:
AliasName: str() is the name of the alias
TargetKeyId: str() of the Key Unique ID or ARN
Returns:
alias Alias()
"""
alias = Alias(
'KmsKeyAlias',
**kwargs
)
return alias
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,904
|
lambda-my-aws/ozone
|
refs/heads/master
|
/codebuild/layer.py
|
#!/usr/bin/env python
"""
Script to generate the CFN template for the library into a Lambda Layer from within the CodeBuild of the Layer Build.
"""
from datetime import datetime as dt
from ozone.templates.awslambdalayer import template
from os import environ
from json import dumps
from argparse import ArgumentParser
import boto3
def get_artifact_location():
"""
Retrieves the Destination bucket and path of the Layer from CodeBuild within the job
"""
job_id = environ['CODEBUILD_BUILD_ID']
client = boto3.client('codebuild')
build_info = client.batch_get_builds(
ids=[job_id]
)['builds'][0]
location = build_info['artifacts']['location'].strip('aws:arn:s3:::')
bucket = location.split('/')[0]
key = location.split('/', 1)[-1]
return (bucket, key)
if __name__ == '__main__':
PARSER = ArgumentParser('Codebuild CFN template and params build')
PARSER.add_argument(
'--path', help='Path where CFN files are created', required=True
)
ARGS = PARSER.parse_args()
BUILD_DEST = get_artifact_location()
LAYER_NAME = environ['LAYER_NAME']
PY_VERSION = environ['PY_VERSION']
DATE = dt.utcnow().isoformat()
TPL = template(make_public=True, Runtimes=[PY_VERSION], Bucket=BUILD_DEST[0], Key=BUILD_DEST[1])
TPL.set_metadata({
'Author': 'John Mille john@lambda-my-aws.io',
'Version': DATE,
'BuildBy': 'CodePipeline/CodeBuild',
'LayerName': LAYER_NAME
})
TPL.set_description(f'Template for {LAYER_NAME} - {DATE}')
with open(f'{ARGS.path}/layer_template.yml', 'w') as fd:
fd.write(TPL.to_yaml())
template_config = {
'Parameters':
{
'LayerName': LAYER_NAME
},
'Tags': {
'Name': LAYER_NAME
}
}
with open(f'{ARGS.path}/layer_config.json', 'w') as fd:
fd.write(dumps(template_config))
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,905
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/filters/__init__.py
|
"""
Scripts used across the board to format all sorts outputs and strings
"""
def get_resource_type(object_name, strip=True):
res_type = object_name.resource_type.replace(':', '')
if strip:
return res_type.replace('AWS', '')
return res_type
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,906
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resources/devtools/pipeline.py
|
"""
Set of functions to use to create a CodePipeline pipeline using Troposphere
"""
import re
import sys
from troposphere import (
ImportValue,
Select,
GetAtt,
Ref,
Sub
)
from troposphere.codepipeline import (
Pipeline,
Stages,
Actions,
ActionTypeId,
InputArtifacts,
OutputArtifacts,
ArtifactStore,
DisableInboundStageTransitions
)
from ozone.filters.arns import (
iam_role as filter_iamrole,
s3_bucket as filter_s3bucket,
lambda_function as filter_lambda
)
from ozone.filters import get_resource_type
from troposphere.codebuild import Project
CFN_TEMPLATE_ARTIFACT_PATH_PATTERN = r'(([\w]+)::([a-z\/]+).(json|yaml|yml))'
NON_ALPHANUM = r'([^\w])'
class PipelineActions(Actions, object):
"""
"""
_action = None
_config = {}
@property
def config(self):
return self.__config
@config.setter
def config(self, config):
self.__config = config
@property
def action_type(self):
return self.__action_type
@action_type.setter
def action_type(self, action_type):
self.__action_type = action_type
@property
def outputs(self):
return self.__outputs
@outputs.setter
def outputs(self, name):
self.__outputs = [
OutputArtifacts(Name=name)
]
def filter_args(self, kwargs, args, valid_types):
for arg in kwargs.keys():
if arg not in args:
raise KeyError(f'Source action requires {arg}')
if not isinstance(kwargs[arg], valid_types):
raise TypeError(f'{key} has to be of type', valid_types)
def output_as_input(self, inputs):
"""
Takes a list of ouputs OutputArtifacts and transforms it into a list of
inputs InputArtifacts
"""
outputs = []
self.check_output_artifacts(inputs)
for input_object in inputs:
outputs.append(InputArtifacts(Name=input_object.Name))
return outputs
def check_input_artifacts(self, input_artifacts):
"""
Args:
input_artifacts: list of InputArtifacts
Returns:
bool if all okay
Raises:
TypeError if the list of artifacts is not right
"""
if not isinstance(input_artifacts, list):
raise TypeError('input_artifacts must be of type', list)
for artifact in input_artifacts:
if not isinstance(artifact, InputArtifacts):
raise TypeError(f'Input artifact {artifact} must be of type', InputArtifacts)
return True
def check_output_artifacts(self, output_artifacts):
"""
Args:
output_artifacts: list of InputArtifacts
Returns:
bool if all okay
Raises:
TypeError if the list of artifacts is not right
"""
if not isinstance(output_artifacts, list):
raise TypeError('output_artifacts must be of type', list)
for artifact in output_artifacts:
if not isinstance(artifact, OutputArtifacts):
raise TypeError(f'Output artifact {artifact} must be of type', OutputArtifacts)
class SourceAction(PipelineActions, object):
"""
Class to create a "Source" Pipeline action
"""
_actions_args_types = (Ref, Sub, GetAtt, str)
_supported_providers = ['GitHub', 'CodeCommit']
_github_args = ['Repo', 'Branch', 'Owner', 'OAuthToken']
_github_action_type = ActionTypeId(
Category="Source",
Owner="ThirdParty",
Provider="GitHub",
Version="1"
)
_codecommit_args = ['RepositoryName', 'BranchName']
_codecomit_action_type = ActionTypeId(
Category="Source",
Owner="AWS",
Provider="CodeCommit",
Version="1"
)
__config = {}
__action_type = None
__outputs = None
@property
def outputs(self):
return self.__outputs
@outputs.setter
def outputs(self, name):
self.__outputs = [
OutputArtifacts(Name=name)
]
def __init__(self, name, provider, config):
self.config = config
self.set_source_action(provider)
self.outputs = re.sub(NON_ALPHANUM, '', name)
super().__init__(
Name=name,
ActionTypeId=self.action_type,
Configuration=self.config,
OutputArtifacts=self.outputs,
RunOrder="1"
)
def set_source_action(self, provider):
provider_name = provider.lower()
args = getattr(self, f'_{provider_name}_args')
self.action_type = getattr(self, f'_{provider_name}_action_type')
self.filter_args(self.config, args, self._actions_args_types)
if provider_name == 'github':
self.config['PollForSourceChanges'] = False
class BuildAction(PipelineActions, object):
"""
"""
__config = {}
_codebuild_action_type = ActionTypeId(
Category="Build",
Owner="AWS",
Version="1",
Provider="CodeBuild"
)
@property
def build_name(self):
return self.__build_name
@build_name.setter
def build_name(self, name):
build_resource = get_resource_type(Project)
if isinstance(name, (Sub, Ref, Select, ImportValue)):
self.__build_name = name
else:
self.__build_name = ImportValue(f'{name}-{build_resource}-Name')
@property
def project_name(self):
return self.__build_name
@project_name.setter
def project_name(self, name):
self.__project_name = name
def __init__(self, name, inputs, project_name, primary_source=None):
self.check_output_artifacts(inputs)
self.inputs = self.output_as_input(inputs)
self.project_name = project_name
self.build_name = project_name
if len(inputs) > 5:
raise ValueError('CodeBuild does not support more than 5 input artifacts')
self.config = {}
self.config['ProjectName'] = self.project_name
if primary_source is not None:
self.config['PrimarySource'] = primary_source
elif len(inputs) > 1:
self.config['PrimarySource'] = self.inputs[0].Name
self.action_type = self._codebuild_action_type
self.outputs = re.sub(NON_ALPHANUM, '', project_name)
super().__init__(
Name=self.build_name,
InputArtifacts=self.inputs,
ActionTypeId=self.action_type,
Configuration=self.config,
OutputArtifacts=self.outputs,
RunOrder="1"
)
class DeployAction(PipelineActions, object):
"""
"""
_max_inputs_cloudformation = 10
_valid_providers = ['CloudFormation'] # unsuported yet, 'CodeDeploy', 'S3']
def __init__(self, name, inputs, provider, **kwargs):
self.check_output_artifacts(inputs)
self.inputs = self.output_as_input(inputs)
if provider not in self._valid_providers:
raise ValueError(f'provider {provider} is not a supported provider', self._valid_providers)
action_provider = provider.lower()
try:
klass = getattr(self, action_provider)
deployer = klass(**kwargs)
except AttributeError:
raise AttributeError(f'Error importing {action_provider}')
if 'RunOrder' in kwargs.keys() and isinstance(kwargs['RunOrder'], str):
setattr(deployer.action, 'RunOrder', kwargs['RunOrder'])
if 'Name' in kwargs.keys() and isinstance(kwargs['Name'], str):
setattr(deployer.action, 'Name', kwargs['Name'])
super().__init__(
Name=name,
InputArtifacts=self.inputs,
ActionTypeId=deployer.action_type,
Configuration=deployer.config,
RunOrder="1"
)
class cloudformation():
"""
For Cloudformation deployment
"""
required_keys = ['RoleArn', 'StackName', 'TemplatePath']
action_type = ActionTypeId(
Category="Deploy",
Owner="AWS",
Version="1",
Provider="CloudFormation"
)
def __init__(self, **kwargs):
assert all(key in kwargs.keys() for key in self.required_keys)
pattern = re.compile(CFN_TEMPLATE_ARTIFACT_PATH_PATTERN)
template_path = kwargs['TemplatePath']
if not (kwargs['TemplatePath'].startswith('https://s3.amazonaws.com') or
pattern.match(kwargs['TemplatePath'])):
raise ValueError(
'TemplatePath must either be a full path to S3'
f'or use the pattern {CFN_TEMPLATE_ARTIFACT_PATH_PATTERN}'
)
self.config = {
'StackName': kwargs['StackName'],
'ActionMode': 'CREATE_UPDATE',
'RoleArn': filter_iamrole(kwargs['RoleArn']),
'TemplatePath': kwargs['TemplatePath']
}
if 'ActionMode' in kwargs.keys():
self.config['ActionMode'] = kwargs['ActionMode']
class InvokeAction(PipelineActions, object):
"""
"""
def __init__(self, name, inputs, function_name, **kwargs):
self.check_output_artifacts(inputs)
self.inputs = self.output_as_input(inputs)
self.config = {
'FunctionName': filter_lambda(function_name)
}
if 'UserParameters' in kwargs.keys():
self.config['UserParameters'] = kwargs['UserParameters']
self.outputs = re.sub(NON_ALPHANUM, '', name)
super().__init__(
Name="GenerateCfnTemplate",
InputArtifacts=self.inputs,
OutputArtifacts=self.outputs,
ActionTypeId=ActionTypeId(
Category="Invoke",
Owner="AWS",
Version="1",
Provider="Lambda"
),
Configuration=self.config,
RunOrder="1"
)
class CodePipeline(Pipeline, object):
"""
"""
_stages_list = []
@property
def stages_list(self):
return self._stages_list
@stages_list.setter
def stages_list(self, stages):
for stage in stages:
self._stages_list.append(Stages(Name=stage[0], Actions=stage[1]))
def __init__(self, title, role, bucket, stages, name=None, **kwargs):
"""
Args:
title: Name of the object in the template
"""
self.stages_list = stages
super().__init__(
title,
Stages=self.stages_list,
RestartExecutionOnUpdate=True,
RoleArn=filter_iamrole(role),
ArtifactStore=ArtifactStore(
Type="S3",
Location=bucket
)
)
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,907
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/templates/awslambdalayer_pipeline.py
|
"""
Static Pipeline representation to create a CodePipeline dedicated to building
Lambda Layers
"""
from troposphere import (
Parameter,
Template,
GetAtt,
Ref,
Sub
)
from ozone.handlers.lambda_tools import check_params_exist
from ozone.resources.iam.roles.pipeline_role import pipelinerole_build
from ozone.resources.devtools.pipeline import (
SourceAction,
BuildAction,
DeployAction,
InvokeAction,
CodePipeline
)
from ozone.outputs import object_outputs
def template(**kwargs):
"""
"""
template_required_params = [
'BucketName',
'Source', 'LayerBuildProjects', 'LayersMergeProject',
'LayerName', 'GeneratorFunctionName', 'CloudformationRoleArn'
]
check_params_exist(template_required_params, kwargs)
template = Template()
token = template.add_parameter(Parameter(
'GitHubOAuthToken',
Type="String",
NoEcho=True
))
role = pipelinerole_build(
UseCodeCommit=True,
UseCodeBuild=True,
UseLambda=True,
UseCloudformation=True,
Bucket=kwargs['BucketName']
)
if kwargs['Source']['Provider'].lower() == 'github':
kwargs['Source']['Config']['OAuthToken'] = Ref(token)
source = SourceAction(
name='SourceCode',
provider=kwargs['Source']['Provider'],
config=kwargs['Source']['Config']
)
build_actions = []
builds_projects = kwargs['LayerBuildProjects']
for project in builds_projects:
build_actions.append(BuildAction(
project,
source.outputs,
project
))
build_outputs = []
for action in build_actions:
build_outputs += action.outputs
merge_action = BuildAction(
'MergeAction',
build_outputs,
kwargs['LayersMergeProject']
)
invoke = InvokeAction(
'GenerateTemplateForCfn',
merge_action.outputs,
function_name=kwargs['GeneratorFunctionName']
)
input_name = invoke.outputs[0].Name
deploy = DeployAction(
'DeployToCfn',
invoke.outputs,
'CloudFormation',
StackName=f'layer-{kwargs["LayerName"]}',
RoleArn=kwargs['CloudformationRoleArn'],
TemplatePath=f'{input_name}::tmp/template.json'
)
stages = [
('Source', [source]),
('BuildLayers', build_actions),
('MergeLayers', [merge_action]),
('GenerateCfnTemplate', [invoke]),
('DeployWithCfn', [deploy]),
]
pipeline = CodePipeline(
'Pipeline',
GetAtt(role, 'Arn'),
kwargs['BucketName'],
stages
)
template.add_resource(role)
template.add_resource(pipeline)
template.add_output(object_outputs(pipeline, True))
return template
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,908
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/templates/examples/function_template.py
|
#!/usr/bin/env python
import yaml
from ozone.templates.awslambda import template
from ozone.resources.iam.policies import AWS_LAMBDA_BASIC_EXEC
from troposphere.iam import Role
ROLE = Role(
'IamRole',
Path='/lambda/demo',
ManagedPoliciesArns=[AWS_LAMBDA_BASIC_EXEC]
)
with open('function_config.yml', 'r') as fd:
CONFIG = yaml.safe_load(fd.read())
assert CONFIG
TEMPLATE_ARGS = {}
if 'layers' in CONFIG.keys():
TEMPLATE_ARGS['Layers'] = CONFIG['layers']
TEMPLATE_ARGS['Role'] = 'dummyrole'
TEMPLATE_ARGS['Runtime'] = CONFIG['runtime']
TEMPLATE = template(**TEMPLATE_ARGS)
print(TEMPLATE.to_json())
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,909
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resolvers/codebuild/runtime.py
|
import boto3
import re
CODEBUILD_PLATFORMS = ['UBUNTU']
def cleanup_version(version_str):
"""
Function that replaces the parts of the string as decribed in pairs in the dict
Allows to run a sub(x, y, str) of another sub for each key of hte dict
"""
dict = {
'aws/codebuild/': '',
'.': '',
':': '',
'-': '',
'_': ''
}
regex = re.compile("(%s)" % "|".join(map(re.escape, dict.keys())))
return regex.sub(lambda word: dict[word.string[word.start():word.end()]], version_str)
def generate_runtime_mapping_and_parameters(languages):
client = boto3.client('codebuild')
try:
mappings = {}
params_languages = []
params_versions = []
platform_mappings = {}
platforms = client.list_curated_environment_images()['platforms']
for platform in platforms:
if platform['platform'] in CODEBUILD_PLATFORMS:
platform_key = platform['platform'].lower()
platform_mappings[platform_key] = {}
for language in platform['languages']:
if language['language'].lower() in languages:
language_name = language['language']
language_key = cleanup_version(language['language'].lower())
params_languages.append(language_key)
platform_mappings[platform_key][language_key] = {}
for image in language['images']:
language_version = cleanup_version(image['name'])
params_versions.append(language_version)
platform_mappings[platform_key][language_key][language_version] = language_version
platform_mappings[platform_key][language_key][language_version] = image['versions'][-1]
mappings = {}
for platform in CODEBUILD_PLATFORMS:
key = platform.lower()
for j in platform_mappings[key]:
mappings[j] = platform_mappings[key][j]
return (True, mappings, params_languages, params_versions)
except Exception as error:
return None
if __name__ == '__main__':
import json
print(json.dumps(
generate_runtime_mapping_and_parameters(['python', 'node_js', 'docker']),
indent=2
))
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,910
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resources/iam/roles/__init__.py
|
from troposphere import (
Tags,
Ref,
Sub
)
from troposphere.iam import (
Policy, Role
)
from ozone.tags import IamRoleTags
from datetime import date
import hashlib
def role_trust_policy(service_name, require_mfa=False, external_id=None):
"""
"""
statement = {
"Effect": "Allow",
"Principal": {
"Service": [
Sub(f'{service_name}.${{AWS::URLSuffix}}')
]
},
"Action": [ "sts:AssumeRole" ]
}
if require_mfa or external_id:
statement['Condition'] = {}
if require_mfa:
statement['Condition']['Bool'] = {
"aws:MultiFactorAuthPresent": "true"
}
if external_id is not None:
statement['Condition']['StringEquals'] = {
"sts:ExternalId": external_id
}
policy_doc = {
"Version" : "2012-10-17",
"Statement": [
statement
]
}
return policy_doc
def pass_policy(to_services, name=None, resource=None):
services = []
if not isinstance(to_services, list):
raise TypeError('services must be of type', list)
for service in to_services:
if service.endswith('.com') or service.endswith('.com.cn'):
services.append(service)
else:
services.append(Sub(f'{service}.${{URLSuffix}}'))
statement = {
"Action": [
"iam:PassRole"
],
"Resource": "*",
"Effect": "Allow",
"Condition": {
"StringEqualsIfExists": {
"iam:PassedToService": services
}
}
}
policy = Policy(
PolicyName='AllowPassRoleTo'.join('', to_services),
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
statement
]
}
)
return policy
class IamRole(Role, object):
"""
Simple Class to initialize a role and add access policies to it
"""
def __init__(self, title, services, mfa=False, external_id=None, tags_function=None, **kwargs):
"""
Args:
services: string / list of services to allow to assume the role
kwargs:
"""
super().__init__(
title,
AssumeRolePolicyDocument=role_trust_policy(services, mfa, external_id)
)
for key in kwargs.keys():
setattr(self, key, kwargs[key])
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,911
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/outputs/__init__.py
|
"""
Common functions to return outputs
"""
from troposphere import (
Output,
GetAtt,
Export,
Ref,
Sub
)
from troposphere.iam import Role
from troposphere.awslambda import LayerVersion
from ozone.filters import get_resource_type
def object_outputs(template_resource, supports_arn=False, export=False,
append_title=False, name_is_id=False):
"""
Args:
template_object: Troposphere resource object to extract outputs from
export: bool: Exports the value to the Region for other stacks to use
suports_arn: bool: If the object supports GetAtt(object, 'Arn'), set to true to add the
ARN to the outputs
kwargs:
key-pair values to add more tags
Returns:
outputs: list: list of Output() object for the Troposphere template
"""
outputs = []
object_type = get_resource_type(template_resource)
name_ext = 'Name'
if name_is_id:
name_ext = 'Id'
suffix = name_ext
export_name = f'${{AWS::StackName}}-{object_type}'
if append_title:
export_name = f'${{AWS::StackName}}-{object_type}-{template_resource.title}'
output = Output(
f'{template_resource.title}{name_ext}',
Value=Ref(template_resource)
)
if export:
setattr(
output, 'Export',
Export(Sub(f'{export_name}-{suffix}'))
)
outputs.append(output)
if supports_arn:
output = Output(
f'{template_resource.title}Arn',
Value=GetAtt(template_resource, 'Arn')
)
if export:
setattr(output, 'Export',
Export(Sub(f'{export_name}-Arn'))
)
outputs.append(output)
if isinstance(template_resource, Role):
output = Output(
f'{template_resource.title}UniqueId',
Value=GetAtt(template_resource, 'RoleId')
)
if export:
setattr(
output, 'Export',
Export(Sub(f'{export_name}-RoleId'))
)
outputs.append(output)
elif isinstance(template_resource, LayerVersion):
output = Output(
f'{template_resource.title}Arn',
Value=Ref(template_resource)
)
if export:
setattr(
output, 'Export',
Export(Sub(f'{export_name}-Arn'))
)
outputs.append(output)
return outputs
def comments_outputs(comments, export=False):
"""
Args:
comments: list: list of key pair values to add outputs not related to an object
export: bool: set to True if the value should be exported in the region
Returns:
outputs: list: list of Output() object for the Troposphere template
"""
outputs = []
if isinstance(comments, list):
for comment in comments:
if isinstance(comment, dict):
keys = list(comment.keys())
args = {
'title': keys[0],
'Value': comment[keys[0]]
}
if export:
args['Export'] = Export(Sub(f'${{AWS::StackName}}-{keys[0]}'))
outputs.append(Output(**args))
return outputs
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,912
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/handlers/stack_manage.py
|
"""
Functions to help creation / update of stacks
"""
import boto3
from ozone.handlers import UPDATABLE_STATUSES
from botocore.exceptions import ValidationError, ClientError
def check_if_stack_exists(client, stack_name):
"""
Args:
stack_name: name of the CFN stack
region: name of the region where the stack is looked up
Returns:
boolean True if found, False if not or failed
"""
res = {}
try:
res = client.describe_stacks(StackName=stack_name)
except (ClientError, ValidationError) as error:
response = error.response['Error']
if (response['Code'] == 'ValidationError' and
response['Message'].find('does not exist')):
return (False, None)
if 'Stacks' in res.keys() and res['Stacks']:
if res['Stacks'][0]['StackStatus'] in UPDATABLE_STATUSES:
return (True, res['Stacks'][0])
def create_update_stack(client, **cfn_args):
"""
Args:
region: name of the AWS region to create / update the stack into
cfn_args: arguments for CFN Create / Update stack call
Returns:
{'StackId': stack_id} from boto3 response
"""
stack_exists = check_if_stack_exists(
client, cfn_args['StackName']
)
if stack_exists is not None and stack_exists[0]:
for key in ['EnableTerminationProtection', 'OnFailure']:
if key in cfn_args.keys():
cfn_args.pop(key, None)
try:
stack_r = client.update_stack(**cfn_args)
return stack_r
except ClientError as error:
response = error.response['Error']
if (response['Code'] == 'ValidationError' and
response['Message'] == 'No updates are to be performed.'):
return {'StackId' : stack_exists[1]['StackId']}
else:
stack_r = client.create_stack(**cfn_args)
return stack_r
def check_if_stackset_exists(client, stackset_name):
"""
Args:
stackset_name: name of the CFN stackset
region: name of the region where the stackset is looked up
Returns:
boolean True if found, False if not or failed
"""
res = {}
try:
res = client.describe_stack_set(StacksetName=stack_name)
except (ClientError, ValidationError) as error:
response = error.response['Error']
if (response['Code'] == 'ValidationError' and
response['Message'].find('does not exist')):
return (False, None)
if 'Stacks' in res.keys() and res['Stacks']:
if res['Stacks'][0]['StackStatus'] in UPDATABLE_STATUSES:
return (True, res['Stacks'][0])
def create_update_stack_set(client, **cfn_args):
"""
Args:
region: name of the AWS region to create / update the stack into
cfn_args: arguments for CFN Create / Update stack call
Returns:
{'StackId': stack_id} from boto3 response
"""
stackset_exists = check_if_stack_set_exists(
client, cfn_args['StackName']
)
if stackset_exists is not None and stackset_exists[0]:
try:
stack_r = client.update_stack_set(**cfn_args)
except ClientError as error:
response = error.response['Error']
if (response['Code'] == 'ValidationError' and
response['Message'] == 'No updates are to be performed.'):
return {'StackId' : stackset_exists[1]['StackId']}
return stack_r
else:
stack_r = client.create_stack_set(**cfn_args)
return stack_r
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,913
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/filters/regexes.py
|
S3_PATH_URL = r's3:\/\/([a-z0-9A-Z-]+)\/([\x00-\x7F][^\n]+$)'
S3_ARN_PREFIX = 'arn:aws:s3:::'
S3_NAME = r'[a-z0-9-]+'
S3_ARN = r'^(arn:aws:s3:::[a-z-]+)$'
OU_PATH = r'^([^\/][A-Za-z0-9\/]+[^\/]$)$|^(\/root)$|^([a-zA-Z0-9]+)$'
IAM_ROLE_NAME = r'^[a-zA-Z0-9-_]+$'
IAM_ROLE_ARN = r'^(arn:aws:iam::[0-9]{12}:role\/[a-zA-Z0-9-_]+$)'
LAMBDA_NAME = IAM_ROLE_NAME
LAMBDA_ARN = r'^(arn:aws:lambda:[a-z]{2}-[a-z]{1,12}-[0-9]{1}:[0-9]{12}:function\/[a-zA-Z0-9]+)$'
LAMBDA_LAYER_VERSION = r'(^[a-z]+:[0-9]+$)'
LAMBDA_LAYER_ARN = r'(^arn:aws:lambda:[a-z]{2}-[a-z]{1,12}-[0-9]{1}:[0-9]{12}:layer:[a-zA-Z0-9]+:[0-9]{1,10})'
KMS_KEY_ID = r'^([a-z0-9]{8}(-[a-z0-9-]{4}){3}-[a-z0-9]+)$'
KMS_KEY_ARN = r'^(arn:aws:kms:[a-z]{2}-[a-z]{1,10}-[0-9]{1}:[0-9]{12}:key\/[a-z0-9]{8}(-[a-z0-9-]{4}){3}-[a-z0-9]+)$'
KMS_ALIAS = r'(^(alias/)([a-zA-Z0-9-_/]+)$)'
KMS_ALIAS_ARN = r'^(arn:aws:kms:[a-z]{2}-[a-z]{1,10}-[0-9]{1}:[0-9]{12}:(^(alias/)([a-zA-Z0-9-_/]+)$))$'
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,914
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resources/s3/bucket.py
|
"""
Generic Bucket build attempt
"""
from troposphere import (
Sub
)
from troposphere.s3 import (
Bucket,
LifecycleConfiguration,
LifecycleRule,
VersioningConfiguration,
AbortIncompleteMultipartUpload,
BucketEncryption,
SseKmsEncryptedObjects,
SourceSelectionCriteria,
ServerSideEncryptionRule,
ServerSideEncryptionByDefault,
ReplicationConfiguration,
ReplicationConfigurationRules,
ReplicationConfigurationRulesDestination,
EncryptionConfiguration
)
from ozone.filters.arns import (
s3_bucket as filter_s3bucket,
iam_role as filter_role
)
S3_ARN = 'arn:aws:s3:::'
class S3Bucket(Bucket, object):
def set_replication_rule(self, replica_bucket, **kwargs):
"""
Args:
replica_bucket: Name/ARN of the destination s3 bucket
Returns:
rule ReplicationConfigurationRule
"""
replica_bucket_arn = filter_s3bucket(replica_bucket)
destination = ReplicationConfigurationRulesDestination(
Bucket=replica_bucket_arn
)
if 'UseEncryptionReplication' in kwargs.keys() and kwargs['UseEncryptionReplication']:
encryption_config = EncryptionConfiguration(
ReplicaKmsKeyID=kwargs['ReplicaKmsKeyID']
)
setattr(destination, 'EncryptionConfiguration', encryption_config)
if 'ReplicateEncryptedObjects' in kwargs.keys() and kwargs['ReplicateEncryptedObjects']:
source_criteria = SourceSelectionCriteria(
SseKmsEncryptedObjects=SseKmsEncryptedObjects(
Status='Enabled'
)
)
else:
source_criteria = SourceSelectionCriteria(
SseKmsEncryptedObjects=SseKmsEncryptedObjects(
Status='Disabled'
)
)
rule = ReplicationConfigurationRules(
Prefix='',
Status='Enabled',
Destination=destination,
SourceSelectionCriteria=source_criteria
)
return rule
def set_bucket_replication(self, **kwargs):
"""
returns:
bucket replication configuration
"""
config = ReplicationConfiguration(
Role=filter_role(kwargs['ReplicationRole']),
Rules=[
self.set_replication_rule(
kwargs['ReplicaBucket'],
**kwargs
)
]
)
setattr(self, 'ReplicationConfiguration', config)
def set_bucket_lifecycle(self):
"""
returns:
LifecycleConfiguration for S3Bucket
"""
config = LifecycleConfiguration(
Rules=[
LifecycleRule(
Status='Enabled',
AbortIncompleteMultipartUpload=AbortIncompleteMultipartUpload(
DaysAfterInitiation=3
)
)
]
)
setattr(self, 'LifecycleConfiguration', config)
def set_bucket_encryption(self, **kwargs):
"""
returns:
EncryptionConfiguration for S3Bucket
"""
if 'KMSMasterKeyID' in kwargs.keys():
encryption_default = ServerSideEncryptionByDefault(
SSEAlgorithm='aws:kms',
KMSMasterKeyID=kwargs['KMSMasterKeyID']
)
else:
encryption_default = ServerSideEncryptionByDefault(
SSEAlgorithm='aws:kms'
)
config = BucketEncryption(
ServerSideEncryptionConfiguration=[
ServerSideEncryptionRule(
ServerSideEncryptionByDefault=encryption_default
)
]
)
setattr(self, 'BucketEncryption', config)
def __init__(self, title, bucket_name, **kwargs):
"""
Args:
kwargs:
AppendRegion: boolean
UseVersioning: boolean
UseLifecycle: boolean
UseReplication: boolean
ReplicationRole: str() role name of full ARN
ReplicaBucket: Name of the bucket for replication
UseEncryptionReplication: boolean
ReplicateEncryptedObjects: boolean (default true)
ReplicaKmsKeyID: alias or arn of the KMS Key
UseEncryption: boolean
KMSMasterKeyID: Default KMS Key ID for encryption
returns:
S3Bucket
"""
super().__init__(title)
if 'AppendRegion' in kwargs.keys() and kwargs['AppendRegion']:
self.BucketName = Sub(f'{bucket_name}-${{AWS::Region}}')
else:
self.BucketName = bucket_name
if 'UseEncryption' in kwargs.keys() and kwargs['UseEncryption']:
self.set_bucket_encryption(**kwargs)
if 'UseLifecycle' in kwargs.keys() and kwargs['UseLifecycle']:
self.set_bucket_lifecycle()
if not hasattr(self, 'VersioningConfiguration'):
setattr(
self, 'VersioningConfiguration',
VersioningConfiguration(
Status='Enabled'
)
)
if 'UseReplication' in kwargs.keys() and kwargs['UseReplication']:
self.set_bucket_replication(**kwargs)
if not hasattr(self, 'VersioningConfiguration'):
setattr(
bucket, 'VersioningConfiguration',
VersioningConfiguration(
Status='Enabled'
)
)
if __name__ == '__main__':
import json
print(json.dumps(
S3Bucket(
'test',
UseEncryption=False,
UseLifecycle=False,
UseReplication=True,
UseEncryptionReplication=False,
ReplicationRole='arn:aws:iam:::role/toto',
ReplicaBucket='destination-finale',
KMSMasterKeyId=Sub('some-id-like-that'),
ReplicaKmsKeyID=Sub('replica-key-id')
).to_dict(),
indent=2
))
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,915
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resources/kms/policy.py
|
"""
Functions to generate KMS Keys policies
"""
from troposphere import (
Sub
)
from ozone.resolvers.organizations import (
find_org_in_tree,
get_ou_accounts,
get_all_accounts_in_ou_and_sub
)
KEY_POLICY_ALLOW_CLOUDTRAIL_READ = {
"Sid": "Allow CloudTrail to describe key",
"Effect": "Allow",
"Principal": {
"Service": "cloudtrail.amazonaws.com"
},
"Action": "kms:DescribeKey",
"Resource": "*"
}
def iam_access(iam_users, iam_roles):
"""
Default KMS Key policy for local account management
Args:
iam_users: list() of IAM users matching ^([\x20-\x7E]*)$ local to the account
iam_roles: list() of IAM roles matching ^([\x20-\x7E]*)$ local to the account
Returns:
statement: dict() representing the core key policy
"""
statement = {
"Sid": "Allow administration of the key",
"Effect": "Allow",
"Principal": {
"AWS": [
Sub("arn:aws:iam::${AWS::AccountId}:root")
]
},
"Action": [
"kms:*"
],
"Resource": "*"
}
if iam_users:
for user in iam_users:
statement['Principal']['AWS'].append(
Sub(f"arn:aws:iam::${{AWS::AccountId}}:user/{user}")
)
if iam_roles:
for role in iam_roles:
statement['Principal']['AWS'].append(
Sub(f"arn:aws:iam::${{AWS::AccountId}}:role/{role}")
)
return statement
def _cloudtrail_access(accounts_ids):
"""
Args:
accounts_ids: List of AWS Account Ids [0-9]{12}
Returns:
statemens: list of KMS Policy statements to be added to the Key policy
"""
trails_arns = []
accounts_roots = []
for account_id in accounts_ids:
accounts_roots.append(f'arn:aws:iam::{account_id}:root')
trails_arns.append(f'arn:aws:cloudtrail:*:{account_id}:trail/*')
statements = [
{
"Sid": "Allow CloudTrail to encrypt logs",
"Effect": "Allow",
"Resource": ["*"],
"Principal": {
"Service": "cloudtrail.amazonaws.com"
},
"Action": [
"kms:GenerateDataKey*"
],
"Condition":{
"StringLike":
{
"kms:EncryptionContext:aws:cloudtrail:arn": trails_arns
}
}
},
{
"Sid": "Allow principals in the account to decrypt log files",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": [
"kms:Decrypt",
"kms:ReEncryptFrom"
],
"Resource": "*",
"Condition": {
"StringLike": {
"kms:EncryptionContext:aws:cloudtrail:arn": trails_arns
}
}
},
{
"Sid": "Enable encrypted CloudTrail log read access",
"Effect": "Allow",
"Principal": {
"AWS": accounts_roots
},
"Action": "kms:Decrypt",
"Resource": "*",
"Condition": {
"Null": {
"kms:EncryptionContext:aws:cloudtrail:arn": "false"
}
}
}
]
return statements
def add_cloudtrail_access(accounts_ids):
"""
Function to add CloudTrail access the account using a list of Account Ids
Args:
accounts_ids: List of account Ids ([0-9]{12})
"""
return _cloudtrail_access(accounts_ids)
def add_cloudtrail_ou_access(ou_path, use_as_root=False):
"""
Extends the policy in case CloudTrail is the main user of the KMS Key
"""
ou_info = find_org_in_tree(ou_path)
ou_accounts = []
if use_as_root:
ou_accounts_list = get_all_accounts_in_ou_and_sub(ou_info['Id'])
else:
ou_accounts_list = get_ou_accounts(ou_info['Id'])
try:
assert ou_accounts_list
except AssertionError:
raise ValueError(f'No accounts found for {ou_path}')
for account in ou_accounts_list:
ou_accounts.append(account['Id'])
return add_cloudtrail_access(ou_accounts)
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,916
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resolvers/organizations/__init__.py
|
#!/usr/bin/env python
"""
Scripts to get specific values for AWS Organization Units
"""
import boto3
def get_root():
"""
Returns:
the root account Info. There is only one value possible
"""
client = boto3.client('organizations')
return client.list_roots()['Roots'][0]
def get_ou_by_name(ou_name, parent_id):
"""
Args:
ou_name: name of the OU
parent_id: Id of the parent for that OU
Returns:
Tuple(Bool, Dict) bool to determined if the call worked
"""
client = boto3.client('organizations')
res = client.list_organizational_units_for_parent(
ParentId=parent_id
)
for ou_info in res['OrganizationalUnits']:
if ou_info['Name'] == ou_name:
return ou_info
return None
def lookup_org_by_id(org_id):
"""
Args:
org_id: OU Id
Returns:
Dict() with the info of the OU
"""
client = boto3.client('organizations')
return client.describe_organizational_unit(OrganizationalUnitId=org_id)['OrganizationalUnit']
def get_all_accounts_in_ou_and_sub(parent_id, ous_list=None, accounts_list=None):
"""
Args:
parent_id: ID of the parent to lookup from
accounts_list: only used for recursion
next_token: only used for recursion
Returns:
accounts_list: list() of all the accounts in the OU and Sub OUs
"""
if accounts_list is None:
accounts_list = []
if ous_list is None:
ous_list = []
accounts_list += get_ou_accounts(parent_id)
ous_list += get_ou_sub_ous(parent_id)
if ous_list:
item = ous_list[0]
ous_list.pop(0)
return get_all_accounts_in_ou_and_sub(item['Id'], ous_list, accounts_list)
return accounts_list
def find_org_in_tree(ou_path, parent_ou=None, separator='/'):
"""
Args:
ou_path: full path to the ou using
Returns:
dictionary containing Id, Name and Arn of the OU
Raises:
ValueError() in case nothing was found
"""
if not ou_path.startswith('/') and parent_ou is None:
ou_path = '/' + ou_path
client = boto3.client('organizations')
if ((ou_path.find(separator) < 0 and parent_ou is None) or
ou_path == '/root' or
ou_path == separator):
return get_root()
if (ou_path.find(separator) >= 0 and
ou_path.startswith(separator) and
parent_ou is None):
child = ou_path.split(separator, 1)[-1]
parent_ou = get_root()
return find_org_in_tree(child, parent_ou)
if ou_path.find(separator) > 0 and parent_ou is not None:
new_parent = ou_path.split(separator, 1)[0]
new_child = ou_path.split(separator, 1)[-1]
children = client.list_children(
ParentId=parent_ou['Id'],
ChildType='ORGANIZATIONAL_UNIT'
)['Children']
for child in children:
the_ou = None
ou_info = lookup_org_by_id(child['Id'])
if new_parent == ou_info['Name']:
return find_org_in_tree(new_child, ou_info)
if ou_path.find(separator) < 0 and parent_ou is not None:
children = client.list_children(
ParentId=parent_ou['Id'],
ChildType='ORGANIZATIONAL_UNIT'
)['Children']
for child in children:
ou_info = lookup_org_by_id(child['Id'])
if ou_path == ou_info['Name']:
return ou_info
raise ValueError(f'Could not find the ou {ou_path}')
def get_ou_sub_ous(parent_id, ous_list=None, next_token=None):
"""
Args:
parent_id: ID of the parent to lookup from
accounts_list: only used for recursion
next_token: only used for recursion
Returns:
accounts_list: list() of all the accounts in the OU and Sub OUs
"""
client = boto3.client('organizations')
if ous_list is None:
ous_list = []
if isinstance(next_token, str):
ous_r = client.list_children(
ParentId=parent_id, ChildType="ORGANIZATIONAL_UNIT", NextToken=next_token
)
else:
ous_r = client.list_children(
ParentId=parent_id, ChildType="ORGANIZATIONAL_UNIT")
for ou_child in ous_r['Children']:
ous_list.append(ou_child)
if 'NextToken' in ous_r.keys():
return get_ou_sub_ous(parent_id, ous_list, next_token)
return ous_list
def get_ou_accounts(parent_id, accounts_list=None, next_token=None):
"""
Args:
parent_id: string that represents the Id of the parent OU
accounts_list: list of accounts from a previous call due to the recursive
next_token: the token for the call in case a recursive occurs
Returns:
list of dict() with the information of the accounts in the OU
"""
if accounts_list is None:
accounts_list = []
client = boto3.client('organizations')
if isinstance(next_token, str):
res = client.list_accounts_for_parent(
ParentId=parent_id,
NextToken=next_token
)
else:
res = client.list_accounts_for_parent(
ParentId=parent_id,
)
accounts_list += res['Accounts']
if 'NextToken' in res.keys():
return get_ou_accounts(parent_id, accounts_list, res['NextToken'])
return accounts_list
def get_ou_accounts_by_ou_name(ou_name, accounts_list=None, parent=None):
"""
Returns the account of an OU by itsname
Args:
ou_name: name of the OU
accounts_list: list of accounts from a previous call due to the recursive
next_token: the token for the call in case a recursive occurs
Returns:
list of dict() with the information of the accounts in the OU
"""
if accounts_list is None:
accounts_list = []
if parent is None:
parent = get_root()['Id']
try:
ou_info = get_ou_by_name(ou_name, parent)
parent = ou_info['Id']
except:
raise ValueError(f'Failed to retrieve the organization unit of name {ou_name}')
return get_ou_accounts(parent)
if __name__ == '__main__':
SEARCH_OU = 'platform'
OUS = get_all_accounts_in_ou_and_sub(find_org_in_tree('productone/prod')['Id'])
for acct in OUS:
print(acct['Name'])
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,917
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/templates/examples/list_amis.py
|
#!/usr/bin/env python
"""
Simple lambda function that returns the latest Amazon Linux AMI to prove how it works in CloudFormation
"""
import boto3
from datetime import datetime
from datetime import timedelta as dt
from ozone.handlers.responder import Responder
def lambda_handler(event, context):
"""
Lambda function handler / entry point
"""
status = "FAILED"
try:
today = datetime.utcnow()
client = boto3.client('ec2')
req = client.describe_images(
Owners=['amazon'],
Filters=[
{
'Name': 'architecture',
'Values': ['x86_64']
},
{
'Name': 'state',
'Values': ['available']
},
{
'Name': 'name',
'Values': ['amzn*']
},
{
'Name': 'virtualization-type',
'Values': ['hvm']
}
]
)
#2017-03-20T09:28:50.000Z
the_image = req['Images'][0]
image = req['Images'][0]
date = datetime.strptime(image['CreationDate'], "%Y-%m-%dT%H:%M:%S.%fZ")
delta = today - date
the_delta = delta
req['Images'].pop()
for image in req['Images']:
date = datetime.strptime(image['CreationDate'], "%Y-%m-%dT%H:%M:%S.%fZ")
delta = today - date
if delta < the_delta:
the_delta = delta
the_image = image
data = {
'ImageId': the_image['ImageId']
}
result = "SUCCESS"
except Exception as error:
print(error)
responder = Responder(event)
responder.sender.respond(
event, context, result, response_data=data,
reason='Latest AMI Id for Amazon Linux', physical_resource_id='abcd'
)
if __name__ == '__main__':
lambda_handler(
{
"RequestType": "Create",
"ResponseURL": "http://pre-signed-S3-url-for-response",
"StackId": "arn:aws:cloudformation:eu-west-1:123456789012:stack/MyStack/guid",
"RequestId": "unique id for this create request",
"ResourceType": "Custom::TestResource",
"LogicalResourceId": "MyTestResource",
"ResourceProperties": {
"StackName": "MyStack",
"List": [
"1",
"2",
"3"
]
}
},
None
)
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,918
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/templates/awslambdalayer.py
|
#!/usr/bin/env python
"""
Script to create a new Lambda Layer CFN template via Troposphere
"""
from datetime import datetime
from troposphere.awslambda import (
LayerVersion, Content, LayerVersionPermission
)
from troposphere import (
Parameter,
Template,
Sub,
Ref
)
from ozone.handlers.lambda_tools import check_params_exist
from ozone.outputs import object_outputs
from datetime import datetime as dt
DATE = dt.utcnow().strftime('%Y%m%d%H%M%S')
def template(make_public=False, **kwargs):
required_params = ['Runtimes', 'Bucket', 'Key']
check_params_exist(required_params, kwargs)
template = Template()
layer_name = template.add_parameter(Parameter(
'LayerName',
Type="String",
AllowedPattern="[a-zA-Z0-9-]*"
))
template.set_description('Default template for Lambda Layer version')
template.set_transform('AWS::Serverless-2016-10-31')
version = template.add_resource(LayerVersion(
f'LayerVersion{DATE}',
DeletionPolicy='Retain',
CompatibleRuntimes=kwargs['Runtimes'],
Description=Sub(f'Layer ${{{layer_name.title}}}'),
LayerName=Ref(layer_name),
Content=Content(
S3Bucket=kwargs['Bucket'],
S3Key=kwargs['Key']
)
))
if make_public:
PERM = template.add_resource(LayerVersionPermission(
f'LambdaVersionPermission{DATE}',
DeletionPolicy='Retain',
Principal='*',
LayerVersionArn=Ref(version),
Action='lambda:GetLayerVersion'
))
template.add_output(object_outputs(version, export=True))
return template
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,919
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/handlers/lambda_tools.py
|
#!/usr/bin/env python
"""
Tools functions
"""
def check_params_exist(params_list, event, event_attribute=None, is_cfn=False):
"""
Checks that parameters in event exist
Depending on the type of call to the Lambda function,
checks if all the input parameters are present
Args:
----------
params_list: list
List of parameters to check the presence in event
event: dict
Lambda function handler event dict
event_attribute: string
Name of the attribute to check if parameters are
stored within that key of the event dict
is_cfn: bool
Specifies if the caller is CloudFormation and
therefore lookup into ResourceProperties key of event
Returns
------
tuple
0 - bool to inform if the task is successful or failed
1 - string describing which parameter is missing or to say all params have been found
"""
event_lookup = None
if is_cfn and not event_attribute:
event_lookup = "ResourceProperties"
elif not is_cfn and event_attribute:
if not event_attribute in event.keys():
raise AttributeError("Event does not have key {0}".format(event_attribute))
event_lookup = event_attribute
if not event_lookup:
for param in params_list:
if not param in event.keys():
raise AttributeError("{0} not in events attributes".format(param))
else:
print(event_lookup)
print(event[event_lookup])
for param in params_list:
print(param)
if not param in event[event_lookup].keys():
raise AttributeError("{0} not in events attributes".format(param))
return True
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,920
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resources/iam/policies/__init__.py
|
AWS_LAMBDA_BASIC_EXEC = 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole'
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,921
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resolvers/kms/__init__.py
|
import boto3
def get_kms_key_id_via_alias(alias_name, region=None):
"""
Returns the Key Id based on the alias name
"""
if region is None:
client = boto3.client('kms')
else:
client = boto3.client('kms', region_name=region)
for alias in client.list_aliases()['Aliases']:
if alias['AliasName'] == alias_name:
if 'TargetKeyId' in alias.keys():
return alias['TargetKeyId']
return None
def get_kms_key_arn_via_alias(alias_name, region):
"""
Returns the ARN of the KMS Key based on the alias
"""
key_id = get_kms_key_id_via_alias(alias_name, region)
if key_id[0]:
client = boto3.client('kms', region_name=region)
return client.describe_key(
KeyId=key_id[1]
)['KeyMetadata']['Arn']
return None
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,922
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resources/s3/cloudtrail_policy.py
|
from troposphere import (
Sub,
Ref
)
from troposphere.s3 import BucketPolicy
from ozone.filters.arns import s3_bucket as filter_s3bucket
from ozone.resolvers.organizations import (
find_org_in_tree,
get_ou_accounts,
get_all_accounts_in_ou_and_sub
)
def _cloudtrail_service_access_check(bucket_arn):
statement = {
"Sid": "AWSCloudTrailAclCheck",
"Effect": "Allow",
"Principal": {
"Service": "cloudtrail.amazonaws.com"
},
"Action": "s3:GetBucketAcl",
"Resource": bucket_arn
}
return statement
def _cloudtrail_arns(bucket, **kwargs):
"""
Returns:
trail_arns list() of the Resources to allow for CloudTrail
"""
arns = []
if 'AccountsIds' in kwargs.keys() and isinstance(kwargs['AccountsIds'], list):
for account in kwargs['AccountsIds']:
arns.append(Sub(f'${{{bucket.title}.Arn}}/AWSLogs/{account}/*'))
elif 'OrganizationUnit' in kwargs.keys():
ou_info = find_org_in_tree(kwargs['OrganizationUnit'])
ou_accounts = []
if 'OuAsRoot' in kwargs.keys() and kwargs['OuAsRoot']:
ou_accounts_list = get_all_accounts_in_ou_and_sub(ou_info['Id'])
else:
ou_accounts_list = get_ou_accounts(ou_info['Id'])
try:
assert ou_accounts_list
except AssertionError:
raise ValueError(f'No accounts found for {ou_path}')
for account in ou_accounts_list:
arns.append(Sub(f'${{{bucket.title}.Arn}}/AWSLogs/{account["Id"]}/*'))
return arns
def _cloudtrail_accounts_access(bucket, **kwargs):
statement = {
"Sid": "AWSCloudTrailWrite20131101",
"Effect": "Allow",
"Principal": {
"Service": "cloudtrail.amazonaws.com"
},
"Action": "s3:PutObject",
"Condition": {
"StringEquals": {
"s3:x-amz-acl": "bucket-owner-full-control"
}
}
}
# "Resource": "arn:aws:s3:::trail-eu-west-1-ews-productone-prod/AWSLogs/406414658319/*",
statement['Resource'] = _cloudtrail_arns(bucket, **kwargs)
return statement
def _cloudtrail_bucket_policy(bucket, **kwargs):
"""
Args:
kwargs:
AccountsIds: List of account Ids to grant access to aws logs path
OrganizationUnit: str() of the OU name or Path
OuAsRoot: bool() to define whethere all accounts in OU and sub OU should be used
Returns:
bucket_policy dict()
"""
bucket_arn = filter_s3bucket(bucket)
assert bucket_arn
statement = []
statement.append(_cloudtrail_service_access_check(bucket_arn))
statement.append(_cloudtrail_accounts_access(bucket, **kwargs))
bucket_policy = {
"Version": "2012-10-17",
"Statement": statement
}
return bucket_policy
def policy_build(bucket, **kwargs):
"""
Args:
kwargs:
OrganizationUnit: str() of the OU name or Path
OuAsRoot: bool() to define whethere all accounts in OU and sub OU should be used
Returns:
bucket_policy BucketPolicy()
"""
bucket_policy = BucketPolicy(
'BucketPolicy',
Bucket=Ref(bucket),
PolicyDocument=_cloudtrail_bucket_policy(bucket, **kwargs)
)
return bucket_policy
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,923
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resources/kms/key.py
|
from troposphere import (
Sub
)
from troposphere.kms import (
Key
)
from .policy import (
iam_access,
add_cloudtrail_access,
add_cloudtrail_ou_access
)
def key_build(**kwargs):
"""
Args:
kwargs:
IamUsers: list of IAM user names to grant admin access to the key
IamRoles: list of IAM role names to grant admin access to the key
UseCloudtrail: bool() to define whether or not the key shall be used for CloudTrail
AccountsIds: list() of account ids representing the accounts using the key for CT
OrganizationUnitName: str() of the name of the organization unit
to lookup accounts for
UseOrganizationUnitAsRoot: bool() - if True, will lookup all accounts within
in the OU and in the SubOu to apply it to
Returns:
key Key()
"""
iam_users = []
iam_roles = []
if 'IamUsers' in kwargs.keys():
iam_users = kwargs['IamUsers']
if 'IamRoles' in kwargs.keys():
iam_roles = kwargs['IamRoles']
key_policy = {
"Version": "2012-10-17",
"Id": "Key polocy for cloudtrail",
"Statement": []
}
key_policy['Statement'].append(iam_access(iam_users, iam_roles))
if 'UseCloudTrail' in kwargs.keys():
if 'AccountsIds' in kwargs.keys():
key_policy['Statement'] += add_cloudtrail_access(kwargs['AccountsIds'])
elif 'OrganizationName' in kwargs.keys():
if ('UseOrganizationUnitAsRoot' in kwargs.keys() and
kwargs['UseOrganizationUnitAsRoot']):
key_policy['Statement'] += add_cloudtrail_ou_access(
kwargs['OrganizationName'],
use_as_root=True
)
else:
key_policy['Statement'] += add_cloudtrail_ou_access(
kwargs['OrganizationName']
)
else:
raise KeyError(
'When using CloudFormation, either AccountsIds or OrganizationName must be set'
)
kms_key = Key(
'KmsKey',
Description=Sub('KMS Key in ${AWS::Region}'),
Enabled=True,
EnableKeyRotation=True,
KeyPolicy=key_policy
)
for key in ['Tags', 'Description', 'Enabled', 'EnableKeyRotation', 'KmsKeyPolicy']:
if key in kwargs.keys():
setattr(kms_key, key, kwargs[key])
if 'Name' in kwargs.keys():
kms_key.title = kwargs['Name']
return kms_key
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,924
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/handlers/__init__.py
|
CFN_MAX_TEMPLATE_SIZE = 51200
STATUSES = [
'CREATE_IN_PROGRESS','CREATE_FAILED','CREATE_COMPLETE',
'ROLLBACK_IN_PROGRESS','ROLLBACK_FAILED','ROLLBACK_COMPLETE',
'DELETE_IN_PROGRESS','DELETE_FAILED','DELETE_COMPLETE',
'UPDATE_IN_PROGRESS','UPDATE_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_COMPLETE','UPDATE_ROLLBACK_IN_PROGRESS','UPDATE_ROLLBACK_FAILED',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE','REVIEW_IN_PROGRESS'
]
UPDATABLE_STATUSES = [
'CREATE_COMPLETE',
'ROLLBACK_COMPLETE',
'UPDATE_COMPLETE',
'UPDATE_ROLLBACK_COMPLETE'
]
CAPABILITIES = ['CAPABILITY_IAM','CAPABILITY_NAMED_IAM','CAPABILITY_AUTO_EXPAND']
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,925
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/handlers/template_manage.py
|
#/usr/bin/env python
"""
Functions to manage a template and wheter it should be stored in S3
"""
from datetime import datetime as dt
import hashlib
import boto3
def create_template_in_s3(bucket, file_name, template_body):
"""
Args:
bucket: name of the s3 bucket whereto upload the file
the name of the file in S3
template_body: the body of the template for CFN
Returns:
the URL to the file in S3 if successful, None if upload failed
"""
date = dt.utcnow().isoformat()
date_hash = hashlib.sha1(b'{date}').hexdigest()
key = f'{file_name}'
client = boto3.client('s3')
client.put_object(
Body=template_body,
Key=key,
Bucket=bucket
)
url_path = f'https://s3.amazonaws.com/{bucket}/{key}'
return url_path
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,926
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/resources/devtools/buildproject.py
|
"""
BuildProject
============
The BuildProject is an object which inherits of the Troposphere object properties
but uses wrapper methods in order to populate / assign values to these. Just because,
I am Lazy.
TriggerGroup
============
TriggerGroup is an object that builds the list of WebhookFilter for Codebuild Triggers Webhook
It performs all sorts of validation to ensure we are doing things right, and allows a more
human friendly way to pass arguments in order to build your filter group.
"""
import re
#from ast import literal_eval
from troposphere.codebuild import (
Environment,
Source,
Project,
Artifacts,
EnvironmentVariable,
ProjectTriggers,
WebhookFilter
)
from troposphere.iam import (
Role,
Policy
)
from ozone.resources.iam.roles import role_trust_policy
from ozone.filters.arns import (
s3_bucket as filter_s3bucket,
iam_role as filter_iamrole
)
from ozone.resources.iam.policies import (
AWS_LAMBDA_BASIC_EXEC
)
from ozone.resolvers.codebuild.runtime import generate_runtime_mapping_and_parameters
AT_LEAST = lambda x, y: bool(set(x) <= set(y))
ISSET = lambda x, y: x in y and y[x]
KEYISSET = lambda x, y: bool(x in y.keys() and y[x])
IS_PR = 1024
IS_PUSH = 1025
CERTIFICATE_ARN = r'(^(arn:aws:s3:::[a-z0-9-.]+)\/([a-zA-Z0-9\/]+)(.pem|.zip|.crt)$)'
CERTIFICATE_PATTERN = re.compile(CERTIFICATE_ARN)
PR_PATTERNS = [
'PULL_REQUEST_CREATED',
'PULL_REQUEST_UPDATED',
'PULL_REQUEST_REOPENED'
]
WEBHOOK_PATTERNS = ['PUSH'] + PR_PATTERNS
PR_TYPES = [
'HEAD_REF',
'BASE_REF',
'BASE_REF',
'ACTOR_ACCOUNT_ID'
]
PUSH_TYPES = ['ACTOR_ACCOUNT_ID', 'HEAD_REF', 'FILE_PATH']
WEBHOOK_TYPES = [
'EVENT',
'ACTOR_ACCOUNT_ID',
'HEAD_REF',
'BASE_REF',
'FILE_PATH'
]
VALID_FOR_PUSH = ['FILE_PATH', 'ACTOR_ACCOUNT_ID', 'HEAD_REF']
def role_build(bucket_name):
"""
returns:
iam.Role
"""
bucket_name = filter_s3bucket(bucket_name)
role = Role(
"CodeBuildRole",
Path='/cicd/codebuild/',
AssumeRolePolicyDocument=role_trust_policy('codebuild'),
ManagedPolicyArns=[
AWS_LAMBDA_BASIC_EXEC
],
Policies=[
Policy(
PolicyName="S3Access",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
'Effect': 'Allow',
'Resource': [
bucket_name
],
'Action': [
's3:PutObject',
's3:PutObjectVersion',
's3:GetObject',
's3:GetObjectVersion'
]
}
]
}
)
]
)
return role
class TriggerGroup():
"""
Webhook Filter Group
"""
@property
def group(self):
"""
Returns self.__group
"""
return self.__group
@group.setter
def group(self, group):
"""
Initializes self.__group or append items to it
"""
if group is None:
self.__group = []
else:
self.__group = group
def get_evals(self, **kwargs):
"""
Dictionary with multiple eval functions which allow to not have
10kB long lines for an if and reuse everywhere I need it
:returns: evals, dict()
"""
evals = {
'base_forpr': eval(
' KEYISSET("OnUpdate", kwargs) or '
' KEYISSET("OnCreate", kwargs) or '
' KEYISSET("OnReopen", kwargs) '
),
'right_forpr': eval(
'('
' KEYISSET("OnUpdate", kwargs) or '
' KEYISSET("OnCreate", kwargs) or '
' KEYISSET("OnReopen", kwargs) '
') and '
'KEYISSET("SourceBranch", kwargs) and '
'KEYISSET("DestBranch", kwargs)'
),
'base_forpush': eval(
'KEYISSET("OnPush", kwargs) and not'
'('
' KEYISSET("OnUpdate", kwargs) or '
' KEYISSET("OnCreate", kwargs) or '
' KEYISSET("OnReopen", kwargs)'
')'
)
}
return evals
def validate_args(self, **kwargs):
"""
Validates arguments are coherent with what you are trying to achieve (PR vs PUSH detection)
I have taken the opiniated mindset that if you are expecting PR activities,
you can't use PUSH because PUSH doesn't support a BASE_REF therefore makes the payload
of the webhook in case of a PR unexploitable.
Also filters out the fields that the API and Console would yell about if you tried to use
them (ie. use FILE_PATH for PR_*)
:returns: globally defined value to identify whether we are creating a webhook filter
for a PR or a PUSH
"""
evals = self.get_evals(**kwargs)
args = list(kwargs.keys())
if evals['base_forpr'] and evals['right_forpr']:
if KEYISSET('FilePath', kwargs):
raise AttributeError('A PR webhook does not support FILE_PATH')
if KEYISSET('OnPush', kwargs):
raise AttributeError(
'PUSH only supports HEAD_REF branch.'
' The Payload from GIT will not contain the branch to compare to ..'
)
return IS_PR
elif evals['base_forpush']: # and evals['right_forpush']:
if KEYISSET('DestBranch', kwargs):
raise AttributeError('DestBranch is not supported for PUSH')
return IS_PUSH
return None
def set_push(self, **kwargs):
"""
Defines a webhook filter that makes sense for push
:returns: filters, list()
"""
filters = []
if KEYISSET('SourceBranch', kwargs):
filters.append(
WebhookFilter(
Type="HEAD_REF",
Pattern=f'^ref/heads/{kwargs["SourceBranch"]}'
)
)
if KEYISSET('Tags', kwargs):
filters.append(
WebhookFilter(
Type="HEAD_REF",
Pattern=f'^ref/tags/{kwargs["Tags"]}'
)
)
if KEYISSET('NotSourceBranch', kwargs):
filters.append(
WebhookFilter(
Type='HEAD_REF',
Pattern=f'^ref/heads/{kwargs["NotSourceBranch"]}',
ExcludeMatchedPattern=True
)
)
if KEYISSET('FilePath', kwargs):
filters.append(
WebhookFilter(
Type='FILE_PATH',
Pattern=f'{kwargs["FilePath"]}',
)
)
if KEYISSET('UserName', kwargs):
filters.append(
WebhookFilter(
Type='ACTOR_ACCOUNT_ID',
Pattern=f'^ref/heads/{kwargs["UserName"]}',
)
)
filters.append(
WebhookFilter(
Type='EVENT',
Pattern="PUSH"
)
)
return filters
def set_pr(self, source_branch, dest_branch, kwargs):
"""
.. returns: filters, list()
"""
filters = []
source_branch = WebhookFilter(
Type='HEAD_REF',
Pattern=f'^ref/heads/{source_branch}'
)
filters.append(source_branch)
dest_branch = WebhookFilter(
Type='BASE_REF',
Pattern=f'^ref/heads/{dest_branch}'
)
filters.append(dest_branch)
if KEYISSET('NotSourceBranch', kwargs):
not_dest_branch = WebhookFilter(
Type="HEAD_REF",
Pattern=f"^ref/heads/{kwargs['NotSourceBranch']}",
ExcludeMatchedPattern=True
)
filters.append(not_dest_branch)
if KEYISSET('NotSourceBranch', kwargs):
not_source_branch = WebhookFilter(
Type="BASE_REF",
Pattern=f"^ref/heads/{kwargs['NotDestBranch']}",
ExcludeMatchedPattern=True
)
filters.append(not_source_branch)
event_pattern = []
if KEYISSET('OnCreate', kwargs):
event_pattern.append('PULL_REQUEST_CREATED')
if KEYISSET('OnUpdate', kwargs):
event_pattern.append('PULL_REQUEST_UPDATED')
event = WebhookFilter(
Type='EVENT',
Pattern=','.join(event_pattern)
)
filters.append(event)
return filters
def __init__(self, **kwargs):
"""
Sets a trigger for PR between two branches
"""
self.group = None
args = list(kwargs.keys())
filter_is = self.validate_args(**kwargs)
if filter_is == IS_PR:
self.group = self.set_pr(
kwargs['SourceBranch'], kwargs['DestBranch'],
kwargs
)
elif filter_is == IS_PUSH:
self.group = self.set_push(**kwargs)
else:
raise ValueError('The parameters for the webhook are not valid')
class BuildProject(Project):
"""
Class to create a CodeBuild project
"""
_source_must_haves = ['Location']
_source_types = ["S3", "CODEPIPELINE", "GITHUB"]
_compute_types = {
'small': 'BUILD_GENERAL1_SMALL',
'medium': 'BUILD_GENERAL1_MEDIUM',
'large': 'BUILD_GENERAL1_LARGE'
}
def use_codecommit(self, args):
"""
Sets source for CodeCommit
"""
assert all(k in args.keys() for k in self._source_must_haves)
source = Source(
Type="CODECOMMIT",
Location=args['Location']
)
return source
def use_s3_source(self, args):
"""
Defines the source for S3
"""
assert all(k in args.keys() for k in self._source_must_haves)
source = Source(
Type="S3",
NamespaceType="S3",
Location=args['Location']
)
return source
def use_codepipeline_source(self, args):
"""
Defines the source for CodePipeline
"""
assert all(k in args.keys() for k in self._source_must_haves)
source = Source(
Type="CODEPIPELINE",
NamespaceType="NONE",
Packaging="ZIP"
)
return source
def use_github_source(self, args):
"""
Defines the source for GitHub and GitHub Entreprise
"""
assert all(k in args.keys() for k in self._source_must_haves)
github_pattern = re.compile(r'^(https://github.com\/)')
assert github_pattern.match(args['Location'])
source = Source(
Type="GITHUB",
Location=args['Location']
)
return source
def define_source(self, kwargs):
"""
Set source
"""
if 'Source' not in kwargs.keys():
raise KeyError('Source must be present')
source_info = kwargs['Source']
source = None
for source_type in self._source_types:
if source_info['Type'] == source_type:
try:
func = getattr(self, f'use_{source_type.lower()}_source')
source = func(kwargs['Source'])
except AttributeError as error:
raise AttributeError(f'Source Type {source_info["Type"]} is not supported')
if 'BuildSpec' in source_info.keys():
setattr(source, 'BuildSpec', source_info['BuildSpec'])
return source
def define_artifacts(self, kwargs):
artifact = Artifacts(
Type="NO_ARTIFACTS"
)
if 'UseCodePipeline' in kwargs.keys():
artifact = Artifacts(
Type="CODEPIPELINE",
Packaging="ZIP"
)
return artifact
def define_env(self, kwargs):
if 'BuildEnvVars' in kwargs.keys() and kwargs['BuildEnvVars']:
for var in kwarg['BuildEnvVars']:
if not isinstance(var, EnvironmentVariable):
raise TypeError('Environment variables must be of type', EnvironmentVariable)
env_vars = kwargs['BuildEnvVars']
else:
env_vars = []
if kwargs['OS'] == 'WINDOWS':
env_type = 'WINDOWS_CONTAINER'
else:
env_type = 'LINUX_CONTAINER'
if 'Image' in kwargs.keys():
image = kwargs['Image']
else:
image = self.find_image(
kwargs['OS'], kwargs['RuntimeLanguage'], kwargs['RuntimeVersion']
)
env = Environment(
ComputeType=self._compute_types[kwargs['ComputeType']],
Type=env_type,
EnvironmentVariables=env_vars,
Image=image
)
if 'Certificate' in kwargs.keys() and CERTIFICATE_PATTERN.match(kwarg['Certificate']):
setattr(env, 'Certificate', kwargs['Certificate'])
return env
def find_image(self, os, language, version):
"""
Args:
os: Operating System of the base image
language: Codebuild name of the language to use
version: Version of the runtime to use
Returns:
codebuild image matching os, language and latest version
"""
return "aws/codebuild/python:3.6.5"
def define_triggers(self, kwargs):
"""
Defines Codebuild triggers
"""
tropo_filters = []
webhook_filters = []
filters = kwargs['FilterGroups']
if not isinstance(filters, list):
raise TypeError('filters must be of type', list)
for filter_ in filters:
if (
isinstance(filter_, list) and
len(filter_) >= 2 and
isinstance(filter_[0], WebhookFilter)
):
tropo_filters.append(filter_)
elif isinstance(filter_, list) and not isinstance(filter_[0], WebhookFilter):
for hookfilter in filter_:
if not AT_LEAST(['Type', 'Pattern'], hookfilter.keys()):
raise AttributeError('WebHook must have at least Type and Pattern defined')
if hookfilter['Type'] not in WEBHOOK_TYPES:
raise ValueError('Type must be one of the', WEBHOOK_TYPES)
if hookfilter['Type'] == 'EVENT' and not (
AT_LEAST(hookfilter['Pattern'].split(','), WEBHOOK_PATTERNS)):
raise ValueError('Type EVENT only accepts one of', WEBHOOK_PATTERNS)
hook = WebhookFilter(
Type=hookfilter['Type'],
Pattern=hookfilter['Pattern']
)
webhook_filters.append(hook)
tropo_filters.append(webhook_filters)
trigger = ProjectTriggers(Webhook=True, FilterGroups=tropo_filters)
return trigger
def __init__(self, title, role, **kwargs):
"""
Initializes a new Project
"""
super().__init__(
title,
BadgeEnabled=True,
Source=self.define_source(kwargs),
Artifacts=self.define_artifacts(kwargs),
Environment=self.define_env(kwargs),
ServiceRole=filter_iamrole(role),
)
if 'UseWebhooks' in kwargs.keys() and 'FilterGroups' in kwargs.keys():
setattr(self, 'Triggers', self.define_triggers(kwargs))
defined = list(self.properties)
definable = list(self.props.keys())
to_define = list(set(definable)^set(defined))
for key in to_define:
if KEYISSET(key, kwargs):
setattr(self, key, kwargs[key])
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,927
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/templates/awslambda.py
|
from troposphere import (
Parameter,
Template,
Equals,
GetAtt,
Ref,
Sub
)
from troposphere.awslambda import (
Function,
LayerVersion,
Permission,
Alias,
Version,
Code,
Environment
)
from ozone.outputs import object_outputs
from ozone.filters.arns import (
iam_role as filter_iamrole,
lambda_layer as filter_layer
)
def lambda_function(**kwargs):
function = Function(
'LambdaFunction',
Code=Code(
S3Bucket='replace-me',
S3Key='replace-me'
),
Handler='function.lambda_handler',
MemorySize='256',
Timeout=30
)
for key in kwargs.keys():
if key == 'Layers':
layers = []
for layer in kwargs[key]:
layers.append(filter_layer(layer))
print(layers)
setattr(function, key, layers)
elif key == 'S3Bucket' or key == 'S3Key':
setattr(function, 'Code', Code(S3Bucket=kwargs['S3Bucket'], S3Key=kwargs['S3Key']))
elif key == 'Role':
setattr(function, 'Role', filter_iamrole(kwargs[key]))
elif key == 'Environment':
if isinstance(kwargs[key], dict):
setattr(function, key, Environment(Variables=kwargs[key]))
elif isinstance(kwargs[key], Environment):
setattr(function, key, kwargs[key])
else:
setattr(function, key, kwargs[key])
return function
def template(**kwargs):
"""
Args:
Returns:
template Template()
"""
template = Template()
release = template.add_parameter(Parameter(
'ReleaseNewAlias',
Type="String",
AllowedValues = ['Yes', 'No'],
Default = 'No'
))
release_condition = template.add_condition(
'ReleaseAlias',
{
'ReleaseAlias': Equals(
Ref(release),
'Yes'
)
}
)
function = template.add_resource(lambda_function(**kwargs))
version = template.add_resource(Version(
'LambdaVersion',
FunctionName=GetAtt(function, 'Arn')
))
alias = template.add_resource(Alias(
'LambdaAlias',
Name = 'prod',
DependsOn = [release_condition],
Description = Sub(f'Alias to version ${{{version.title}.Arn}}'),
FunctionName = Ref(function),
FunctionVersion = Ref(version)
))
template.add_output(object_outputs(function, True))
template.add_output(object_outputs(version, True))
return template
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,928
|
lambda-my-aws/ozone
|
refs/heads/master
|
/ozone/filters/arns.py
|
"""
Functions that will ensure that the ARN is returned if a string is passed.
If it is an object in troposphere, it will return GetAtt(obj, 'Arn') or Ref() depending on
what the object supports for return.
If it is a string, it must either comply to the last part of a ARN or be a full ARN
and match the ARN pattern
"""
import re
from troposphere import (
AWS_REGION,
AWS_ACCOUNT_ID
)
from troposphere import (
ImportValue,
Parameter,
GetAtt,
Sub,
Ref
)
from troposphere.iam import (
Role
)
from troposphere.s3 import Bucket
from troposphere.awslambda import Function
from troposphere.kms import (
Key, Alias
)
from ozone.filters.regexes import (
S3_ARN_PREFIX, S3_NAME, S3_ARN,
IAM_ROLE_NAME, IAM_ROLE_ARN,
LAMBDA_NAME, LAMBDA_ARN,
LAMBDA_LAYER_VERSION, LAMBDA_LAYER_ARN,
KMS_KEY_ARN, KMS_KEY_ID,
KMS_ALIAS, KMS_ALIAS_ARN
)
def s3_bucket(bucket, any_object=False):
"""
Args:
bucket: represents the bucket object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the bucket name is given
full ARN if full ARN is given and match S3 bucket ARN pattern
"""
arn_pat = re.compile(S3_ARN)
name_pat = re.compile(S3_NAME)
if isinstance(bucket, (ImportValue, GetAtt, Sub, Ref)):
return bucket
elif isinstance(bucket, Parameter):
if any_object:
return Sub('arn:aws:s3:::{bucket}/*')
else:
return Sub('arn:aws:s3:::{bucket}')
elif isinstance(bucket, Bucket):
return GetAtt(bucket, 'Arn')
elif isinstance(bucket, str):
if arn_pat.match(bucket):
return bucket
elif name_pat.match(bucket):
if any_object:
return f'{S3_ARN_PREFIX}{bucket}/*'
else:
return f'{S3_ARN_PREFIX}{bucket}'
else:
raise ValueError('The S3 ARN must follow', S3_ARN)
else:
raise ValueError(
'The S3 ARN must be computed with a function or follow the pattern',
S3_ARN
)
def iam_role(role):
"""
Args:
role: represents the role object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the role name is given
full ARN if full ARN is given and match IAM role ARN pattern
"""
arn_pattern = re.compile(IAM_ROLE_ARN)
name_pattern = re.compile(IAM_ROLE_NAME)
if isinstance(role, str):
if name_pattern.match(role):
role_arn = Sub(f'arn:aws:iam::${{AWS::AccountId}}:role/{role}')
elif role.startswith('arn:aws:iam::') and arn_pattern.match(role):
role_arn = role
else:
raise ValueError(
'Role ARN must follow either the name or full arn patterns',
IAM_ROLE_NAME,
IAM_ROLE_ARN
)
elif isinstance(role, (Parameter, Role)):
role_arn = GetAtt(role, 'Arn')
elif isinstance(role, (GetAtt, Sub, Ref, ImportValue)):
role_arn = role
else:
raise TypeError('role expected to be of type', str, ImportValue, Role, Sub, GetAtt, Ref)
return role_arn
def lambda_function(function):
"""
Args:
function: represents the function object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the function name is given
full ARN if full ARN is given and match function ARN pattern
"""
arn_pattern = re.compile(LAMBDA_ARN)
name_pattern = re.compile(LAMBDA_NAME)
if isinstance(function, str):
if name_pattern.match(function):
function_arn = Sub(f'arn:aws:lambda:${{AWS::Region}}:${{AWS::AccountId}}:function:{function}')
elif function.startswith('arn:aws:lambda:') and arn_pattern.match(function):
function_arn = function
else:
raise ValueError(
'Function ARN must follow either the name or full arn patterns',
LAMBDA_NAME,
LAMBDA_ARN
)
elif isinstance(function, (Parameter, Function)):
function_arn = GetAtt(function, 'Arn')
elif isinstance(function, (ImportValue, GetAtt, Sub, Ref)):
function_arn = function
else:
raise TypeError('Function expected to be of type', str, Role, Sub, GetAtt, Ref, ImportValue)
return function_arn
def lambda_layer(layer):
"""
Args:
layer: represents the layer object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the layer name is given
full ARN if full ARN is given and match Lambda layer ARN pattern
"""
arn_pattern = re.compile(LAMBDA_LAYER_ARN)
version_pattern = re.compile(LAMBDA_LAYER_VERSION)
if isinstance(layer, (GetAtt, Ref, Sub, ImportValue)):
return layer
elif isinstance(layer, str):
if arn_pattern.match(layer):
return layer
elif version_pattern.match(layer):
return Sub(f'arn:aws:lambda:${{AWS::Region}}:${{AWS::AccountId}}:layer:{layer}')
else:
raise ValueError(
"Layer ARN expected of format"
f"{LAMBDA_LAYER_ARN} or {LAMBDA_LAYER_VERSION}"
)
else:
raise ValueError(
'Layer does not comply to any required patterns of Functions'
)
def kms_key(key):
"""
Args:
key: represents the key object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the key name is given
full ARN if full ARN is given and match KMS key ARN pattern
"""
arn_pattern = re.compile(KMS_KEY_ARN)
id_pattern = re.compile(KMS_KEY_ID)
if isinstance(key, (Ref, Sub, ImportValue, GetAtt)):
return key
if isinstance(key, (Parameter, Key)):
return GetAtt(key, 'Arn')
if isinstance(key, str):
if arn_pattern.match(key):
return key
if id_pattern.match(key):
return Sub(f'arn:aws:kms:${{AWS::Region}}:${{AWS::AccountId}}:key/{key}')
else:
raise ValueError('Key does not match pattern', KMS_KEY_ARN, KMS_KEY_ID)
def kms_alias(alias):
"""
Args:
alias: represents the alias object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the alias name is given
full ARN if full ARN is given and match KMS Key alias ARN pattern
"""
arn_pattern = re.compile(KMS_ALIAS_ARN)
alias_pattern = re.compile(KMS_ALIAS)
if isinstance(alias, (Ref, Sub, ImportValue, GetAtt)):
return alias
if isinstance(alias, (Parameter, Alias)):
return GetAtt(alias, 'Arn')
if isinstance(alias, str):
if arn_pattern.match(alias):
return alias
if alias_pattern.match(alias):
return Sub(f'arn:aws:kms:${{AWS::Region}}:${{AWS::AccountId}}:{alias}')
else:
raise ValueError('Alias does not match pattern', alias, KMS_ALIAS, KMS_ALIAS_ARN)
|
{"/ozone/resources/iam/roles/pipeline_role.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py"], "/codebuild/layer.py": ["/ozone/templates/awslambdalayer.py"], "/ozone/resources/devtools/pipeline.py": ["/ozone/filters/arns.py", "/ozone/filters/__init__.py"], "/ozone/templates/awslambdalayer_pipeline.py": ["/ozone/handlers/lambda_tools.py", "/ozone/resources/iam/roles/pipeline_role.py", "/ozone/resources/devtools/pipeline.py", "/ozone/outputs/__init__.py"], "/ozone/templates/examples/function_template.py": ["/ozone/templates/awslambda.py", "/ozone/resources/iam/policies/__init__.py"], "/ozone/resources/iam/roles/__init__.py": ["/ozone/tags/__init__.py"], "/ozone/outputs/__init__.py": ["/ozone/filters/__init__.py"], "/ozone/handlers/stack_manage.py": ["/ozone/handlers/__init__.py"], "/ozone/resources/s3/bucket.py": ["/ozone/filters/arns.py"], "/ozone/resources/kms/policy.py": ["/ozone/resolvers/organizations/__init__.py"], "/ozone/templates/awslambdalayer.py": ["/ozone/handlers/lambda_tools.py", "/ozone/outputs/__init__.py"], "/ozone/resources/s3/cloudtrail_policy.py": ["/ozone/filters/arns.py", "/ozone/resolvers/organizations/__init__.py"], "/ozone/resources/kms/key.py": ["/ozone/resources/kms/policy.py"], "/ozone/resources/devtools/buildproject.py": ["/ozone/resources/iam/roles/__init__.py", "/ozone/filters/arns.py", "/ozone/resources/iam/policies/__init__.py", "/ozone/resolvers/codebuild/runtime.py"], "/ozone/templates/awslambda.py": ["/ozone/outputs/__init__.py", "/ozone/filters/arns.py"], "/ozone/filters/arns.py": ["/ozone/filters/regexes.py"]}
|
7,996
|
skudasov/russian-chatbot-primer
|
refs/heads/master
|
/test_e2e.py
|
import requests
HOST = 'http://localhost:5005/'
RECEPIENT = 'default2'
def test_path_skipped():
print(requests.post('%sconversations/%s/respond' % (HOST, RECEPIENT), json={"query":"вези большую пиццу"}).text)
print(requests.post('%sconversations/%s/respond' % (HOST, RECEPIENT), json={"query":"что еще ты можешь сделать для меня"}).text)
def test_path_no_addons():
print(requests.post('%sconversations/%s/respond' % (HOST, RECEPIENT), json={"query":"вези большую пиццу"}).text)
print(requests.post('%sconversations/%s/respond' % (HOST, RECEPIENT), json={"query":"с ветчиной"}).text)
|
{"/compare_validate.py": ["/colors.py"], "/testrunner.py": ["/colors.py", "/compare_validate.py", "/tests/cases.py", "/constants.py", "/nlu_model.py"]}
|
7,997
|
skudasov/russian-chatbot-primer
|
refs/heads/master
|
/compare_validate.py
|
import os
from collections import defaultdict
from datetime import datetime
from itertools import cycle
from colors import yellow, green, red
def extract(data):
return data['intent'], data['intent_ranking'], data['entities']
def test_tresholds(interpreter, cases):
for utter, rules in cases.items():
print(yellow("case: %s" % rules['case']))
result = interpreter.parse(utter)
intent, _, result_entities = extract(result)
if not intent:
print(red("INTENT DETECTION FAILED: expecting %s, found %s" % (rules['intent'], None)))
elif intent['name'] != rules['intent']:
print(red("INTENT DETECTION FAILED: expecting %s, found %s" % (rules['intent'], intent['name'])))
print(green('result intents: %s' % intent))
print(green('result entities: %s' % result_entities))
for test_entity_data in rules['entity']['ner_crf']:
print('searching entity: %s with treshold %s' % (test_entity_data['name'], test_entity_data['confidence']))
for res_ent in result_entities:
if res_ent['entity'] == test_entity_data['name']:
if res_ent['value'] != test_entity_data['value']:
print(red("FAILED PREDICTION: expecting %s -> %s, found %s -> %s"
% (test_entity_data['name'], test_entity_data['value'], res_ent['entity'], res_ent['value'])))
elif res_ent['confidence'] < test_entity_data['confidence']:
print(red("FAILED PREDICTION: %s prediction is %s that is below treshold %s"
% (test_entity_data['name'], res_ent['confidence'], test_entity_data['confidence'])))
def calculate_plot_data(interpreter, cases):
intent_good = 0
intent_total = 0
ner_good = 0
ner_total = 0
ner_names = []
ner_confidences = []
intent_names = []
intent_confidences = []
for utter, case in cases.items():
intent_names.append("id: %s -> %s" % (case['id'], case['intent']))
result = interpreter.parse(utter)
intent, _, result_entities = extract(result)
if not intent:
intent_total += 1
elif intent['name'] == case['intent']:
intent_good += 1
intent_total += 1
intent_confidences.append(intent['confidence'])
elif intent['name'] != case['intent']:
intent_total += 1
for test_entity_data in case['entity']['ner_crf']:
for idx, res_ent in enumerate(result_entities):
if res_ent['entity'] == test_entity_data['name'] and res_ent['value'] == test_entity_data['value']:
ner_names.append("id: [%s] %s -> %s" % (case['id'], res_ent['entity'], res_ent['value']))
ner_confidences.append(res_ent['confidence'])
ner_good += 1
ner_total += 1
break
else:
ner_names.append(
"!ERR NOT FOUND! id: [%s] %s -> %s" % (case['id'], test_entity_data['name'], test_entity_data['value']))
ner_confidences.append(0.0)
ner_total += 1
intent_rate = (intent_good / intent_total) if intent_total > 0 else 0
ner_rate = (ner_good / ner_total) if ner_total > 0 else 0
return ner_names, ner_confidences, intent_names, intent_confidences, intent_rate, ner_rate
def all_filenames_in_dir(dirpath):
for _, _, files in os.walk(dirpath):
return files
def fill_missing_data_with_zeroes(data):
"""
Fill missing data with zeroes in order to plot graph
:param data: [[]...]
:return: [[]...]
"""
lens = []
for e in data:
lens.append(len(e))
max_len = max(lens)
for e in data:
to_fill = max_len - len(e)
if to_fill > 0:
for _ in range(to_fill):
e.append(0)
return data
def name_plot(name):
return "tests-%s-%s.png" % (name, datetime.now())
def plot_comparative_bars(
plot_filename=None,
ylabel=None,
xlabel=None,
title=None,
model_names=None,
entities=None,
confidences=None,
total_rates=None,
dpi=100):
import numpy as np
import matplotlib.pyplot as plt
# data to plot
n_groups = len(confidences[0])
# create plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.20
opacity = 0.9
colors_cycle = cycle(['b', 'g', 'r', 'y'])
step = 0
for idx, model_name in enumerate(model_names):
plt.barh(index + step, confidences[idx], bar_width,
alpha=opacity,
color=next(colors_cycle),
label=model_name)
step += bar_width
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.title(title)
plt.yticks(index + bar_width, entities)
plt.legend()
plt.tight_layout()
plt.savefig(name_plot(plot_filename), dpi=dpi)
|
{"/compare_validate.py": ["/colors.py"], "/testrunner.py": ["/colors.py", "/compare_validate.py", "/tests/cases.py", "/constants.py", "/nlu_model.py"]}
|
7,998
|
skudasov/russian-chatbot-primer
|
refs/heads/master
|
/tests/cases.py
|
cases = {
"хочу пиццу, большую": {
'id': 1,
'case': 'знаки препинания',
'intent': 'order_food',
'entity': {
'ner_crf': [
{
'name': 'product_type',
'value': 'пиццу',
'confidence': 0.7,
},
{
'name': 'product_size',
'value': 'большую',
'confidence': 0.7,
}
]
}
},
'Хочу заказать б ольшую пиццу': {
'id': 2,
'case': 'выделяем сущность с пробелом',
'intent': 'order_food',
'entity': {
'ner_crf': [
{
'name': 'product_type',
'value': 'пиццу',
'confidence': 0.7
},
{
'name': 'product_size',
'value': 'ольшую',
'confidence': 0.7
}
]
}
},
'Хочу заказать пиццу с свром': {
'id': 3,
'case': 'выделяем сущность с ошибкой',
'intent': 'order_food',
'entity': {
'ner_crf': [
{
'name': 'product_type',
'value': 'пиццу',
'confidence': 0.7
},
{
'name': 'product_addon',
'value': 'свром',
'confidence': 0.7
}
]
}
},
'Нужна пицца с глазами тритона и хвостом': {
'id': 4,
'case': 'выделяем сущность сильно превосходящую по размеру',
'intent': 'order_food',
'entity': {
'ner_crf': [
{
'name': 'product_type',
'value': 'пицца',
'confidence': 0.7
},
{
'name': 'product_addon',
'value': 'глазами тритона',
'confidence': 0.7
},
],
}
},
'доставь пиццу с беконом, среднюю': {
'id': 5,
'case': 'меняем порядок слов',
'intent': 'order_food',
'entity': {
'ner_crf': [
{
'name': 'product_type',
'value': 'пицца',
'confidence': 0.7
},
{
'name': 'product_size',
'value': 'среднюю',
'confidence': 0.7
},
{
'name': 'product_addon',
'value': 'беконом',
'confidence': 0.7
}
],
}
},
'вези 30см пиццу с луком и пармезаном': {
'id': 6,
'case': 'числа имеют значение?',
'intent': 'order_food',
'entity': {
'ner_crf': [
{
'name': 'product_addon',
'value': 'луком',
'confidence': 0.7
},
{
'name': 'product_addon',
'value': 'пармезаном',
'confidence': 0.7
},
{
'name': 'product_type',
'value': 'пицца',
'confidence': 0.7
},
{
'name': 'product_size',
'value': '30см',
'confidence': 0.7
},
]
}
},
}
|
{"/compare_validate.py": ["/colors.py"], "/testrunner.py": ["/colors.py", "/compare_validate.py", "/tests/cases.py", "/constants.py", "/nlu_model.py"]}
|
7,999
|
skudasov/russian-chatbot-primer
|
refs/heads/master
|
/colors.py
|
from colorama import Fore, Back, Style
from colorama import init
init()
red = lambda text: Fore.RED + text + Fore.RESET
green = lambda text: Fore.GREEN + text + Fore.RESET
yellow = lambda text: Fore.LIGHTYELLOW_EX + text + Fore.RESET
|
{"/compare_validate.py": ["/colors.py"], "/testrunner.py": ["/colors.py", "/compare_validate.py", "/tests/cases.py", "/constants.py", "/nlu_model.py"]}
|
8,000
|
skudasov/russian-chatbot-primer
|
refs/heads/master
|
/nlu_model.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_nlu.training_data import load_data
from rasa_nlu.model import Trainer
from rasa_nlu import config
def train(data=None, config_file=None, model_dir=None, model_name=None):
training_data = load_data(data)
configuration = config.load(config_file)
trainer = Trainer(configuration)
trainer.train(training_data)
trainer.persist(model_dir, fixed_model_name=model_name)
|
{"/compare_validate.py": ["/colors.py"], "/testrunner.py": ["/colors.py", "/compare_validate.py", "/tests/cases.py", "/constants.py", "/nlu_model.py"]}
|
8,001
|
skudasov/russian-chatbot-primer
|
refs/heads/master
|
/testrunner.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_nlu.model import Interpreter
from colors import yellow, green
from compare_validate import test_tresholds, all_filenames_in_dir, plot_comparative_bars, calculate_plot_data, \
fill_missing_data_with_zeroes
from tests.cases import cases
from constants import *
from nlu_model import train
def get_model_name_by_cfg(cfg_path):
return cfg_path.strip(CFG_PREFIX).strip(CFG_FORMAT)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--train", help="train model", action="store_true")
parser.add_argument("--compare_crossmodel", help="compare intents + ner for different model pipelines", action="store_true")
parser.add_argument("--compare_crossdataset", help="compare intents + ner for different train sets", action="store_true")
parser.add_argument("--model", help="train model", action="store")
args = parser.parse_args()
interpreters = {}
model_names = []
ner_names = None
cross_ner_confidences = []
intent_names = None
cross_intent_confidences = []
intent_rates = []
ner_rates = []
comparison_method = None
if args.compare_crossdataset:
comparison_method = 'cross-dataset by %s model' % args.model
for d in all_filenames_in_dir(DATA_DIR):
if args.model is None:
print('no model is specified. use --model modelcfg.yml')
break
print(yellow('training model with data from path %s' % d))
train(
data='./data/%s' % d,
config_file='./config/%s' % args.model,
model_dir='./models/%s' % d,
model_name=d
)
model_names.append(d)
print(yellow("loading model: %s" % d))
interpreters[d] = Interpreter.load('./models/%s/default/%s' % (d, d))
if args.compare_crossmodel:
comparison_method = 'cross-model'
for p in all_filenames_in_dir(CFG_DIR):
print(yellow('training model with cfg from path %s' % p))
model_name = get_model_name_by_cfg(p)
train(
data='./data/training_data_ru.json',
config_file='./config/%s' % p,
model_dir='./models/%s' % model_name,
model_name=model_name
)
model_names.append(model_name)
print(yellow("loading model: %s" % model_name))
interpreters[model_name] = Interpreter.load('./models/%s/default/%s' % (model_name, model_name))
for name, i in interpreters.items():
print(yellow('testing interpreter: %s' % name))
test_tresholds(i, cases)
ner_names, ner_confidences, intent_names, intent_confidences, intent_rate, total_rate = calculate_plot_data(i, cases)
print("intents: %s" % intent_confidences)
cross_ner_confidences.append(ner_confidences)
cross_intent_confidences.append(intent_confidences)
intent_rates.append([intent_rate])
ner_rates.append([total_rate])
fill_missing_data_with_zeroes(cross_intent_confidences)
fill_missing_data_with_zeroes(cross_ner_confidences)
print(yellow("intent names: %s" % intent_names))
print(yellow("intent confidences: %s" % cross_intent_confidences))
print(yellow("ner names: %s" % ner_names))
print(yellow("ner confidences: %s" % cross_ner_confidences))
print(yellow("intent rates: %s" % intent_rates))
print(yellow("ner rates: %s" % ner_rates))
print(green('generating plots'))
print('model_names: %s' % model_names)
plot_comparative_bars(
plot_filename='ner',
ylabel='Entities',
xlabel='Confidences',
title='Entities by confidences (%s)' % comparison_method,
model_names=model_names,
entities=ner_names,
confidences=cross_ner_confidences,
)
plot_comparative_bars(
plot_filename='intent',
ylabel='Intents',
xlabel='Confidences',
title='Intents by confidences (%s)' % comparison_method,
model_names=model_names,
entities=intent_names,
confidences=cross_intent_confidences,
)
plot_comparative_bars(
plot_filename='quality-intent',
ylabel='Models',
xlabel='Quality',
title='Model intent quality (%s)' % comparison_method,
model_names=model_names,
entities=model_names,
confidences=intent_rates,
)
plot_comparative_bars(
plot_filename='quality-ner',
ylabel='Models',
xlabel='Quality',
title='Model ner quality (%s)' % comparison_method,
model_names=model_names,
entities=model_names,
confidences=ner_rates,
)
|
{"/compare_validate.py": ["/colors.py"], "/testrunner.py": ["/colors.py", "/compare_validate.py", "/tests/cases.py", "/constants.py", "/nlu_model.py"]}
|
8,002
|
skudasov/russian-chatbot-primer
|
refs/heads/master
|
/constants.py
|
CFG_PREFIX = 'config_'
CFG_FORMAT = '.yml'
CFG_DIR = './config'
DATA_DIR = './data'
|
{"/compare_validate.py": ["/colors.py"], "/testrunner.py": ["/colors.py", "/compare_validate.py", "/tests/cases.py", "/constants.py", "/nlu_model.py"]}
|
8,003
|
skudasov/russian-chatbot-primer
|
refs/heads/master
|
/backend/actions.py
|
# -*- coding: utf-8 -*-
import logging
from datetime import datetime
from typing import Text, Dict, Any, List
import json
from rasa_core_sdk import Action, Tracker
from rasa_core_sdk.executor import CollectingDispatcher
from rasa_core_sdk.forms import FormAction
from rasa_core_sdk.events import SlotSet, UserUtteranceReverted, \
ConversationPaused, FollowupAction, Form
logger = logging.getLogger(__name__)
class ActionOrderPizza(Action):
def name(self):
return "action_call_pizza"
def run(self, dispatcher, tracker, domain):
product_size = tracker.get_slot('product_size')
product_type = tracker.get_slot('product_type')
product_addons = tracker.get_slot('product_addon')
print('TRIGGERED ACTION')
return []
class ActionStoreOrder(Action):
def name(self):
return "action_store_order"
def run(self, dispatcher, tracker, domain):
ptype = next(tracker.get_latest_entity_values('product_type'), None)
psize = next(tracker.get_latest_entity_values('product_size'), None)
paddons = next(tracker.get_latest_entity_values('product_addons'), None)
print('extracted: %s | %s | %s' % (ptype, psize, paddons))
return [SlotSet('product_type', ptype)]
|
{"/compare_validate.py": ["/colors.py"], "/testrunner.py": ["/colors.py", "/compare_validate.py", "/tests/cases.py", "/constants.py", "/nlu_model.py"]}
|
8,004
|
Mihkorz/mortality.ai
|
refs/heads/master
|
/Mortality/urls.py
|
"""Mortality URL Configuration
"""
from django.conf.urls import url
from django.conf import settings
from django.contrib import admin
from django.conf.urls.static import static
from website.views import IndexPage, InputForm, nnMortalityResult
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', IndexPage.as_view(), name="website_index"),
url(r'^form/$', InputForm.as_view(), name="website_input_form"),
url(r'^result/$', nnMortalityResult.as_view(), name="website_mortality_result"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{"/Mortality/urls.py": ["/website/views.py"], "/website/views.py": ["/core/algorythm.py", "/website/models.py"], "/website/admin.py": ["/website/models.py"]}
|
8,005
|
Mihkorz/mortality.ai
|
refs/heads/master
|
/website/models.py
|
from __future__ import unicode_literals
from django.db import models
from django_countries.fields import CountryField
SEX_TYPES = (
(0, 'Female'),
(1, 'Male'),
)
SOCIAL_TYPES = (
(0, 'Poor'),
(1, 'Good'),
)
MENTAL_TYPES = (
(0, 'No active illness'),
(1, 'Active illness'),
)
SMOKING_TYPES = (
(0, 'Never smoked'),
(1, 'Former smoker'),
(2, 'Current light smoker'),
(3, 'Current heavy smoker'),
)
ALCOHOL_MAN_TYPES = (
(0, 'Non-drinker'),
(1, '< 1 drink/month'),
(2, '0-4/week'),
(3, '5-9/week'),
(4, '10-24/week'),
(5, 'Heavy drinker'),
)
ALCOHOL_WOMAN_TYPES = (
(0, 'Non-drinker'),
(1, '< 1 drink/month'),
(2, '0-2/week'),
(3, '3-5/week'),
(4, '6-17/week'),
(5, 'Heavy drinker'),
)
ACTIVITY_TYPES = (
(0, 'Low'),
(1, 'Moderate'),
(2, 'High'),
)
class RunnedTest(models.Model):
ip = models.CharField(max_length=30, blank=True)
datetime = models.DateTimeField(auto_now_add=True, blank=True)
input_file = models.FileField("Blood markers file", blank=True)
predicted_age = models.FloatField("Predicted age", default='0', blank=True)
metric = models.CharField(max_length=2, default='eu', blank=True)
age = models.FloatField("Age", default=0, blank=True)
sex = models.IntegerField("Sex", default=1, choices=SEX_TYPES, blank=True)
weight = models.FloatField("Weight", default='0', blank=True)
height = models.FloatField("Height", default='0', blank=True)
bmi = models.FloatField("BMI", default='0', blank=True)
smoking = models.IntegerField("Smoking", default=0, choices=SMOKING_TYPES, blank=True)
alcohol = models.IntegerField("Alcohol", default=0, choices=ALCOHOL_MAN_TYPES, blank=True)
ethnicity = CountryField()
social_status = models.IntegerField("Social status", default=1, choices=SOCIAL_TYPES, blank=True)
activity = models.IntegerField("Activity", default=0, choices=ACTIVITY_TYPES, blank=True)
mental_health = models.IntegerField("Mental health", default=0, choices=MENTAL_TYPES, blank=True)
expected_longevity = models.FloatField("Expected Longevity", default=0, blank=True)
class Article(models.Model):
idx = models.CharField("Identificator", max_length=300, blank=False,
help_text = "Used to place text in the required container on page. Don't change")
header = models.CharField("Header", max_length=300, blank=False,
help_text = "Text header")
text = models.TextField(blank=False,)
|
{"/Mortality/urls.py": ["/website/views.py"], "/website/views.py": ["/core/algorythm.py", "/website/models.py"], "/website/admin.py": ["/website/models.py"]}
|
8,006
|
Mihkorz/mortality.ai
|
refs/heads/master
|
/website/migrations/0004_auto_20160823_1205.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-23 12:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0003_auto_20160817_1352'),
]
operations = [
migrations.AddField(
model_name='runnedtest',
name='metric',
field=models.CharField(blank=True, default='eu', max_length=2),
),
migrations.AlterField(
model_name='article',
name='header',
field=models.CharField(help_text='Text header', max_length=300, verbose_name='Header'),
),
]
|
{"/Mortality/urls.py": ["/website/views.py"], "/website/views.py": ["/core/algorythm.py", "/website/models.py"], "/website/admin.py": ["/website/models.py"]}
|
8,007
|
Mihkorz/mortality.ai
|
refs/heads/master
|
/website/migrations/0003_auto_20160817_1352.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-17 13:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0002_runnedtest_expected_longevity'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idx', models.CharField(help_text="Used to place text in the required container on page. Don't change", max_length=300, verbose_name='Identificator')),
('header', models.CharField(help_text='Text header', max_length=300, verbose_name='Identificator')),
('text', models.TextField()),
],
),
migrations.AlterField(
model_name='runnedtest',
name='alcohol',
field=models.IntegerField(blank=True, choices=[(0, 'Non-drinker'), (1, '< 1 drink/month'), (2, '0-4/week'), (3, '5-9/week'), (4, '10-24/week'), (5, 'Heavy drinker')], default=0, verbose_name='Alcohol'),
),
migrations.AlterField(
model_name='runnedtest',
name='input_file',
field=models.FileField(blank=True, upload_to=b'', verbose_name='Blood markers file'),
),
]
|
{"/Mortality/urls.py": ["/website/views.py"], "/website/views.py": ["/core/algorythm.py", "/website/models.py"], "/website/admin.py": ["/website/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.