index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
36,821
|
egalistmir/pyfvm
|
refs/heads/master
|
/pyfvm/discretize.py
|
# -*- coding: utf-8 -*-
#
from . import form_language
from .discretize_linear import _discretize_edge_integral
from . import fvm_problem
from . import jacobian
import numpy
import sympy
class EdgeKernel(object):
def __init__(self, val):
self.val = val
self.subdomains = [None]
return
def eval(self, u, mesh, cell_ids):
node_edge_face_cells = mesh.idx_hierarchy[..., cell_ids]
X = mesh.node_coords[node_edge_face_cells]
x0 = X[..., 0]
x1 = X[..., 1]
edge_ce_ratio = mesh.ce_ratios[..., cell_ids]
edge_length = numpy.sqrt(mesh.ei_dot_ei[..., cell_ids])
zero = numpy.zeros(node_edge_face_cells.shape)
return (
numpy.array(
self.val(
u[node_edge_face_cells[0]],
u[node_edge_face_cells[1]],
x0,
x1,
edge_ce_ratio,
edge_length,
)
)
+ zero
)
class VertexKernel(object):
def __init__(self, val):
self.val = val
self.subdomains = [None]
return
def eval(self, u, mesh, vertex_ids):
control_volumes = mesh.control_volumes[vertex_ids]
X = mesh.node_coords[vertex_ids].T
zero = numpy.zeros(len(control_volumes))
return self.val(u, control_volumes, X) + zero
class FaceKernel(object):
def __init__(self, val, subdomain):
self.val = val
self.subdomain = subdomain
return
def eval(self, u, mesh, cell_face_nodes):
face_areas = mesh.get_face_areas(cell_face_nodes)
X = mesh.node_coords[cell_face_nodes].T
zero = numpy.zeros(len(cell_face_nodes))
return self.val(u, face_areas, X) + zero
class DirichletKernel(object):
def __init__(self, val, subdomain):
self.val = val
self.subdomain = subdomain
return
def eval(self, u, mesh, vertex_mask):
assert len(u) == sum(vertex_mask)
X = mesh.node_coords[vertex_mask].T
zero = numpy.zeros(sum(vertex_mask))
return self.val(u, X) + zero
def discretize(obj, mesh):
u = sympy.Function("u")
lmbda = sympy.Function("lambda")
try:
res = obj.apply(u, lmbda)
except TypeError:
res = obj.apply(u)
# res = obj.apply(u)
# See <http://docs.sympy.org/dev/modules/utilities/lambdify.html>.
a2a = [{"ImmutableMatrix": numpy.array}, "numpy"]
edge_kernels = set()
vertex_kernels = set()
face_kernels = set()
edge_matrix_kernels = set()
# vertex_matrix_kernels = set()
# boundary_matrix_kernels = set()
jacobian_edge_kernels = set()
jacobian_vertex_kernels = set()
jacobian_face_kernels = set()
for integral in res.integrals:
if isinstance(integral.measure, form_language.ControlVolumeSurface):
# discretization
x0 = sympy.Symbol("x0")
x1 = sympy.Symbol("x1")
el = sympy.Symbol("edge_length")
er = sympy.Symbol("edge_ce_ratio")
expr, index_vars = _discretize_edge_integral(
integral.integrand, x0, x1, el, er, [u]
)
expr = sympy.simplify(expr)
# Turn edge around
uk0 = index_vars[0][0]
uk1 = index_vars[0][1]
expr_turned = expr.subs(
{uk0: uk1, uk1: uk0, x0: x1, x1: x0}, simultaneous=True
)
val = sympy.lambdify(
(uk0, uk1, x0, x1, er, el), [expr, expr_turned], modules=a2a
)
edge_kernels.add(EdgeKernel(val))
# Linearization
expr_lin0 = [sympy.diff(expr, var) for var in [uk0, uk1]]
expr_lin1 = [sympy.diff(expr_turned, var) for var in [uk0, uk1]]
val_lin = sympy.lambdify(
(uk0, uk1, x0, x1, er, el), [expr_lin0, expr_lin1], modules=a2a
)
jacobian_edge_kernels.add(EdgeKernel(val_lin))
elif isinstance(integral.measure, form_language.ControlVolume):
x = sympy.DeferredVector("x")
fx = integral.integrand(x)
# discretization
uk0 = sympy.Symbol("uk0")
try:
expr = fx.subs(u(x), uk0)
except AttributeError: # 'float' object has no
expr = fx
control_volume = sympy.Symbol("control_volume")
expr *= control_volume
val = sympy.lambdify((uk0, control_volume, x), expr, modules=a2a)
vertex_kernels.add(VertexKernel(val))
# Linearization
expr_lin = sympy.diff(expr, uk0)
val_lin = sympy.lambdify((uk0, control_volume, x), expr_lin, modules=a2a)
jacobian_vertex_kernels.add(VertexKernel(val_lin))
else:
assert isinstance(integral.measure, form_language.CellSurface)
x = sympy.DeferredVector("x")
fx = integral.integrand(x)
# discretization
uk0 = sympy.Symbol("uk0")
try:
expr = fx.subs(u(x), uk0)
except AttributeError: # 'float' object has no
expr = fx
face_area = sympy.Symbol("face_area")
expr *= face_area
val = sympy.lambdify((uk0, face_area, x), expr, modules=a2a)
face_kernels.add(FaceKernel(val))
# Linearization
expr_lin = sympy.diff(expr, uk0)
val_lin = sympy.lambdify((uk0, face_area, x), expr_lin, modules=a2a)
jacobian_face_kernels.add(FaceKernel(val_lin))
dirichlet_kernels = set()
jacobian_dirichlet_kernels = set()
dirichlet = getattr(obj, "dirichlet", None)
if callable(dirichlet):
u = sympy.Function("u")
x = sympy.DeferredVector("x")
for f, subdomain in dirichlet(u):
uk0 = sympy.Symbol("uk0")
try:
expr = f(x).subs(u(x), uk0)
except AttributeError: # 'float' object has no
expr = fx
val = sympy.lambdify((uk0, x), expr, modules=a2a)
dirichlet_kernels.add(DirichletKernel(val, subdomain))
# Linearization
expr_lin = sympy.diff(expr, uk0)
val_lin = sympy.lambdify((uk0, x), expr_lin, modules=a2a)
jacobian_dirichlet_kernels.add(DirichletKernel(val_lin, subdomain))
residual = fvm_problem.FvmProblem(
mesh,
edge_kernels,
vertex_kernels,
face_kernels,
dirichlet_kernels,
edge_matrix_kernels,
[],
[],
)
jac = jacobian.Jacobian(
mesh,
jacobian_edge_kernels,
jacobian_vertex_kernels,
jacobian_face_kernels,
jacobian_dirichlet_kernels,
)
return residual, jac
|
{"/examples/bratu_example_test.py": ["/pyfvm/__init__.py", "/pyfvm/form_language.py"], "/examples/complex_energy_test.py": ["/pyfvm/__init__.py"], "/examples/neumann_example_test.py": ["/pyfvm/__init__.py", "/pyfvm/form_language.py"], "/pyfvm/__init__.py": ["/pyfvm/discretize.py", "/pyfvm/nonlinear_methods.py", "/pyfvm/fvm_matrix.py"], "/pyfvm/fvm_problem.py": ["/pyfvm/__init__.py"], "/pyfvm/discretize.py": ["/pyfvm/__init__.py"]}
|
36,822
|
egalistmir/pyfvm
|
refs/heads/master
|
/pyfvm/fvm_matrix.py
|
# -*- coding: utf-8 -*-
#
import numpy
from scipy import sparse
def get_fvm_matrix(
mesh, edge_kernels=None, vertex_kernels=None, face_kernels=None, dirichlets=None
):
edge_kernels = [] if edge_kernels is None else edge_kernels
vertex_kernels = [] if vertex_kernels is None else vertex_kernels
face_kernels = [] if face_kernels is None else face_kernels
dirichlets = [] if dirichlets is None else dirichlets
V, I, J = _get_VIJ(mesh, edge_kernels, vertex_kernels, face_kernels)
# One unknown per vertex
n = len(mesh.node_coords)
matrix = sparse.coo_matrix((V, (I, J)), shape=(n, n))
# Transform to CSR format for efficiency
matrix = matrix.tocsr()
# Apply Dirichlet conditions.
d = matrix.diagonal()
for dirichlet in dirichlets:
verts = mesh.get_vertices(dirichlet.subdomain)
# Set all Dirichlet rows to 0.
for i in verts:
matrix.data[matrix.indptr[i] : matrix.indptr[i + 1]] = 0.0
# Set the diagonal and RHS.
d[verts] = dirichlet.eval(mesh, verts)
matrix.setdiag(d)
return matrix
def _get_VIJ(mesh, edge_kernels, vertex_kernels, face_kernels):
V = []
I_ = []
J = []
for edge_kernel in edge_kernels:
for subdomain in edge_kernel.subdomains:
cell_mask = mesh.get_cell_mask(subdomain)
v_matrix = edge_kernel.eval(mesh, cell_mask)
V.append(v_matrix[0, 0].flatten())
V.append(v_matrix[0, 1].flatten())
V.append(v_matrix[1, 0].flatten())
V.append(v_matrix[1, 1].flatten())
I_.append(mesh.idx_hierarchy[0].flatten())
I_.append(mesh.idx_hierarchy[0].flatten())
I_.append(mesh.idx_hierarchy[1].flatten())
I_.append(mesh.idx_hierarchy[1].flatten())
J.append(mesh.idx_hierarchy[0].flatten())
J.append(mesh.idx_hierarchy[1].flatten())
J.append(mesh.idx_hierarchy[0].flatten())
J.append(mesh.idx_hierarchy[1].flatten())
# TODO
# for vertex_kernel in vertex_kernels:
# for subdomain in vertex_kernel.subdomains:
# vertex_mask = mesh.get_vertex_mask(subdomain)
# vals_matrix = vertex_kernel.eval(mesh, vertex_mask)
# V.append(vals_matrix)
# I_.append(verts)
# J.append(verts)
for face_kernel in face_kernels:
for subdomain in face_kernel.subdomains:
face_mask = mesh.get_face_mask(subdomain)
vals_matrix = face_kernel.eval(mesh, face_mask)
ids = mesh.idx_hierarchy[..., face_mask]
V.append(vals_matrix)
I_.append(ids)
J.append(ids)
# Finally, make V, I, J into 1D-arrays.
V = numpy.concatenate(V)
I_ = numpy.concatenate(I_)
J = numpy.concatenate(J)
return V, I_, J
|
{"/examples/bratu_example_test.py": ["/pyfvm/__init__.py", "/pyfvm/form_language.py"], "/examples/complex_energy_test.py": ["/pyfvm/__init__.py"], "/examples/neumann_example_test.py": ["/pyfvm/__init__.py", "/pyfvm/form_language.py"], "/pyfvm/__init__.py": ["/pyfvm/discretize.py", "/pyfvm/nonlinear_methods.py", "/pyfvm/fvm_matrix.py"], "/pyfvm/fvm_problem.py": ["/pyfvm/__init__.py"], "/pyfvm/discretize.py": ["/pyfvm/__init__.py"]}
|
36,832
|
1007047/Django-MPTT-With-Checkbox-and-Radio-button
|
refs/heads/master
|
/createwidget/admin.py
|
from django.contrib import admin
from django.contrib import admin
from .models import *
from mptt.admin import MPTTModelAdmin
# Register your models here.
admin.site.register(Properties, MPTTModelAdmin)
admin.site.register(Address)
# Register your models here.
|
{"/createwidget/admin.py": ["/createwidget/models.py"], "/createwidget/views.py": ["/createwidget/forms.py", "/createwidget/models.py"], "/createwidget/forms.py": ["/createwidget/models.py", "/createwidget/widget.py"]}
|
36,833
|
1007047/Django-MPTT-With-Checkbox-and-Radio-button
|
refs/heads/master
|
/createwidget/views.py
|
from django.shortcuts import render
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.messages import constants as messages
from django.contrib import messages
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse, Http404, HttpResponseNotAllowed
from .forms import *
from .models import *
import json
from .models import *
# Create your views here.
def createRadio(request):
addressentry = get_object_or_404(Address, pk=1)
if request.method == "POST":
form = AddressForm(request.POST, instance=addressentry)
if form.is_valid():
form.save()
else:
print(form.errors)
form = AddressForm(instance=addressentry)
context = {
'form': form,
}
return render(request, 'createwidget/new.html', context)
def nestedwidget(request):
properties = Properties.objects.all()
addressList = Address.objects.all().order_by('-id')
checbox_input_list = list()
i = 0
for obj in addressList:
temp_dict = dict()
temp_dict[i] = json.loads(obj.checkbox_input)
checbox_input_list.append(temp_dict)
i += 1
p_form = PropertyForm()
context = {'properties': properties,
'p_form': p_form,
'checbox_input_list': checbox_input_list
}
return render(request, 'createwidget/nestedwidget.html', context)
def saveData(request):
data = request.POST.get("data", {})
# print ("hello",request.POST.get('form'))
# form = Addressform2()
# if form.is_valid():
# form.save()
obj = Address()
obj.checkbox_input = data
# addresslist = Address.objects.all()
# if len(addresslist) == 0:
# # obj = Address()
# # obj.id = 1
# # obj.checkbox_input = data
#
# else:
#
# # obj = Address.objects.last
# # obj.id = 1
# # obj.checkbox_input = data
try:
obj.save()
except Exception as e:
print("e "+str(e))
return HttpResponse(status=500)
return HttpResponse(status=200)
|
{"/createwidget/admin.py": ["/createwidget/models.py"], "/createwidget/views.py": ["/createwidget/forms.py", "/createwidget/models.py"], "/createwidget/forms.py": ["/createwidget/models.py", "/createwidget/widget.py"]}
|
36,834
|
1007047/Django-MPTT-With-Checkbox-and-Radio-button
|
refs/heads/master
|
/createwidget/models.py
|
from django.db import models
from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
from django.db.models.signals import post_save
# Create your models here.
# import logging
# from django.views.generic.detail import *
from django.views.generic.detail import BaseDetailView
class Properties(MPTTModel):
name = models.CharField(max_length=32)
parent = TreeForeignKey('self', on_delete=models.CASCADE,
null=True, blank=True, related_name='children')
def __str__(self):
return self.name
class MPTTMeta:
order_insertion_by = ['name']
class Address(models.Model):
id = models.AutoField(primary_key=True)
properties_link = models.ManyToManyField(Properties, blank=True, default=None )
checkbox_input = models.CharField(max_length=5000, null=False, blank=False)
|
{"/createwidget/admin.py": ["/createwidget/models.py"], "/createwidget/views.py": ["/createwidget/forms.py", "/createwidget/models.py"], "/createwidget/forms.py": ["/createwidget/models.py", "/createwidget/widget.py"]}
|
36,835
|
1007047/Django-MPTT-With-Checkbox-and-Radio-button
|
refs/heads/master
|
/createwidget/apps.py
|
from django.apps import AppConfig
class CreatewidgetConfig(AppConfig):
name = 'createwidget'
|
{"/createwidget/admin.py": ["/createwidget/models.py"], "/createwidget/views.py": ["/createwidget/forms.py", "/createwidget/models.py"], "/createwidget/forms.py": ["/createwidget/models.py", "/createwidget/widget.py"]}
|
36,836
|
1007047/Django-MPTT-With-Checkbox-and-Radio-button
|
refs/heads/master
|
/createwidget/widget.py
|
from itertools import chain
from django import forms
from django.conf import settings
from django.forms.widgets import Widget,ChoiceWidget
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
# from django.utils.datastructures import MultiValueDict, MergeDict
# from mptt.templatetags.mptt_tags import cache_tree_children
try:
import simplejson as json
except ImportError:
import json
class CheckboxSelectMultiple(ChoiceWidget):
allow_multiple_selected = True
input_type = 'checkbox'
#template_name = 'nestedwidget.html'
#option_template_name = 'input_option.html'
template_name = 'django/forms/widgets/checkbox_select.html'
option_template_name = 'django/forms/widgets/checkbox_option.html'
def use_required_attribute(self, initial):
# Don't use the 'required' attribute because browser validation would
# require all checkboxes to be checked instead of at least one.
return False
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
def id_for_label(self, id_, index=None):
""""
Don't include for="field_0" in <label> because clicking such a label
would toggle the first checkbox.
"""
if index is None:
return ''
return super().id_for_label(id_, index)
def get_doc(node, values):
if hasattr(node, "get_doc"):
return node.get_doc(values)
if hasattr(node, "name"):
name = node.name
else:
name = unicode(node)
doc = {"title": name, "key": node.pk}
if str(node.pk) in values:
doc['selected'] = True
doc['expand'] = True
return doc
|
{"/createwidget/admin.py": ["/createwidget/models.py"], "/createwidget/views.py": ["/createwidget/forms.py", "/createwidget/models.py"], "/createwidget/forms.py": ["/createwidget/models.py", "/createwidget/widget.py"]}
|
36,837
|
1007047/Django-MPTT-With-Checkbox-and-Radio-button
|
refs/heads/master
|
/createwidget/urls.py
|
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', views.createRadio, name='createradio'),
path('nestedwidget', views.nestedwidget, name='nestedwidget'),
path('datasave', views.saveData, name="datasave"),
]
|
{"/createwidget/admin.py": ["/createwidget/models.py"], "/createwidget/views.py": ["/createwidget/forms.py", "/createwidget/models.py"], "/createwidget/forms.py": ["/createwidget/models.py", "/createwidget/widget.py"]}
|
36,838
|
1007047/Django-MPTT-With-Checkbox-and-Radio-button
|
refs/heads/master
|
/createwidget/forms.py
|
from django import forms
# from django.contrib.auth.models import User
# from django.contrib.auth.forms import UserCreationForm
from .models import *
from mptt.forms import TreeNodeMultipleChoiceField, TreeNodeChoiceField
from .widget import CheckboxSelectMultiple
class PropertyForm(forms.ModelForm):
parent = TreeNodeChoiceField(queryset=Properties.objects.all())
class Meta:
model = Properties
fields = ['name', 'parent']
class AddressForm(forms.ModelForm):
#
class Meta:
model = Address
fields = ['properties_link']
#widgets = {"properties_link": FancyTreeWidget(queryset=Properties.objects.order_by('tree_id', 'lft'),model=Properties)}
widgets = {"properties_link": CheckboxSelectMultiple()}
|
{"/createwidget/admin.py": ["/createwidget/models.py"], "/createwidget/views.py": ["/createwidget/forms.py", "/createwidget/models.py"], "/createwidget/forms.py": ["/createwidget/models.py", "/createwidget/widget.py"]}
|
36,848
|
markxue-0107/Toxic_Comments
|
refs/heads/main
|
/module/predictor.py
|
import csv
class Predictor:
def __init__(self, config, model, logger):
self.logger = logger
self.config = config
self.model = model
def predict(self, test_data):
return self.model.predict(test_data)
def predict_prob(self, test_data):
return self.model.predict_prob(test_data)
def save_to_csv(self, id, probs):
header = ['id', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
self.logger.info("Saving prediction result to a csv file...")
with open(self.config['output_path'], 'w', newline='') as output_file:
writer = csv.writer(output_file)
writer.writerow(header)
for ids, prob in zip(id, probs.tolist()):
writer.writerow([ids] + prob)
self.logger.info("Done. Prediction completed!")
|
{"/module/trainer.py": ["/module/models/textCNN.py", "/module/models/transformer.py", "/module/models/bidirectGRU.py"], "/main.py": ["/module/preprocessor.py", "/module/trainer.py", "/module/predictor.py", "/metrics/metrics.py"], "/module/models/bidirectGRU.py": ["/module/models/model_utils/callbacks.py", "/utils.py"], "/module/preprocessor.py": ["/utils.py"]}
|
36,849
|
markxue-0107/Toxic_Comments
|
refs/heads/main
|
/module/models/textCNN.py
|
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, Conv1D, Dense, Dropout, Flatten, MaxPooling1D
import warnings
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
class TextCNN:
def __init__(self, config, classes, vocab_size, logger, embedding_matrix):
self.models = {}
self.logger = logger
self.vocab_size = vocab_size
self.config = config
self.classes = classes
self.n_of_classes = len(classes)
self.pretrained_embedding = embedding_matrix
self.model = self._build()
def _show_training_config_para(self):
title = "Training config parameters"
self.logger.info(title.center(40, '-'))
self.logger.info("---model_name = {}".format(self.config['model_name']))
self.logger.info("---max_input_len = {}".format(self.config['max_len']))
self.logger.info("---batch_size = {}".format(self.config['batch_size']))
self.logger.info("---dropout = {}".format(self.config['dropout']))
self.logger.info("---epochs = {}".format(self.config['epochs']))
self.logger.info("---num_of_classes = {}".format(self.n_of_classes))
def _build(self):
self._show_training_config_para()
model = Sequential()
if self.pretrained_embedding is not None:
self.logger.info("Found embedding matrix, setting trainable=False")
model.add(Embedding(self.vocab_size,
self.config['embedding_col'],
weights=[self.pretrained_embedding],
input_length=self.config['max_len'],
trainable=False))
else:
self.logger.info("Not found embedding matrix, skip using pretrained model, setting trainable=true")
model.add(Embedding(self.vocab_size,
self.config['embedding_col'],
embeddings_initializer='uniform',
input_length=self.config['max_len'],
trainable=True))
model.add(Conv1D(128, 7, activation='relu', padding='same'))
model.add(MaxPooling1D())
model.add(Conv1D(256, 5, activation='relu', padding='same'))
model.add(MaxPooling1D())
model.add(Conv1D(512, 3, activation='relu', padding='same'))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(self.config['dropout']))
model.add(Dense(self.n_of_classes, activation=None))
model.add(Dense(self.n_of_classes, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
return model
def fit_and_validate(self, train_x, train_y, validate_x, validate_y):
fitted = self.model.fit(train_x,
train_y,
epochs=self.config['epochs'],
verbose=True,
validation_data=(validate_x, validate_y),
batch_size=self.config['batch_size'])
predictions = self.predict(validate_x)
return predictions, fitted
def predict(self, validate_x):
probs = self.model.predict(validate_x)
return probs >= 0.5
def predict_prob(self, validate_x):
return self.model.predict(validate_x)
|
{"/module/trainer.py": ["/module/models/textCNN.py", "/module/models/transformer.py", "/module/models/bidirectGRU.py"], "/main.py": ["/module/preprocessor.py", "/module/trainer.py", "/module/predictor.py", "/metrics/metrics.py"], "/module/models/bidirectGRU.py": ["/module/models/model_utils/callbacks.py", "/utils.py"], "/module/preprocessor.py": ["/utils.py"]}
|
36,850
|
markxue-0107/Toxic_Comments
|
refs/heads/main
|
/module/trainer.py
|
from module.models.textCNN import TextCNN
from module.models.transformer import Transformer
from module.models.bidirectGRU import BidirectionalGRU
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
class Trainer:
def __init__(self, config, classes, logger, vocab_size, embedding_matrix):
self.config = config
self.classes = classes
self.logger = logger
self.vocab_size = vocab_size
self.model = None
self.pretrained_embedding = embedding_matrix
self._create_model(classes)
def _create_model(self, classes):
if self.config['model_name'] == 'text_cnn':
self.logger.info("Creating textCNN model...")
self.model = TextCNN(self.config, classes, self.vocab_size, self.logger, self.pretrained_embedding)
elif self.config['model_name'] == 'transformer':
self.logger.info("Creating transformer...")
self.model = Transformer(self.config, classes, self.vocab_size, self.logger, self.pretrained_embedding)
elif self.config['model_name'] == 'bidirectGRU':
self.logger.info("Creating bidirectional GRU model...")
self.model = BidirectionalGRU(self.config, classes, self.vocab_size, self.logger, self.pretrained_embedding)
else:
self.logger.warning("Currently model {} is not be supported".format(self.config['model_name']))
def fit(self, train_x, train_y):
self.model.fit(train_x, train_y)
return self.model
def metrics(self, predictions, labels):
accuracy = accuracy_score(labels, predictions)
cls_report = classification_report(labels, predictions, zero_division=1)
return accuracy, cls_report
def validate(self, validate_x, validate_y):
predictions = self.model.predict(validate_x, validate_y)
return self.metrics(predictions, validate_y)
def fit_and_validate(self, train_x, train_y, validate_x, validate_y):
predictions, fitted = self.model.fit_and_validate(train_x, train_y, validate_x, validate_y)
accuracy, report = self.metrics(predictions, validate_y)
return self.model, accuracy, report, fitted
|
{"/module/trainer.py": ["/module/models/textCNN.py", "/module/models/transformer.py", "/module/models/bidirectGRU.py"], "/main.py": ["/module/preprocessor.py", "/module/trainer.py", "/module/predictor.py", "/metrics/metrics.py"], "/module/models/bidirectGRU.py": ["/module/models/model_utils/callbacks.py", "/utils.py"], "/module/preprocessor.py": ["/utils.py"]}
|
36,851
|
markxue-0107/Toxic_Comments
|
refs/heads/main
|
/main.py
|
import yaml
import argparse
import logging
from module.preprocessor import Preprocessor
from module.trainer import Trainer
from module.predictor import Predictor
from metrics.metrics import Metrics
import os
if __name__ == "__main__":
# new changes
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
parser = argparse.ArgumentParser(description='Processing command line')
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--loglevel', type=str, default="INFO")
args = parser.parse_args()
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=args.loglevel)
logger = logging.getLogger('global_logger')
classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
with open(args.config) as cfg:
try:
config = yaml.safe_load(cfg)
preprocessor = Preprocessor(config['preprocessing'], classes, logger)
train_data, train_labels, train_x, validate_x, train_y, validate_y, test_data = preprocessor.process()
vocab_size = preprocessor.vocab_size
pretrained_embedding = preprocessor.embedding_matrix
trainer = Trainer(config['training'], classes, logger, vocab_size, pretrained_embedding)
model, accuracy, cls_report, history = trainer.fit_and_validate(train_x, train_y, validate_x, validate_y)
logger.info("Accuracy : {}".format(accuracy))
logger.info("\n{}\n".format(cls_report))
metric = Metrics(config['training']['model_name'], history)
metric.history_plot()
predictor = Predictor(config['predict'], model, logger)
probs = predictor.predict_prob(test_data)
predictor.save_to_csv(preprocessor.test_id, probs)
except yaml.YAMLError as err:
print("config file error : {}".format(err))
|
{"/module/trainer.py": ["/module/models/textCNN.py", "/module/models/transformer.py", "/module/models/bidirectGRU.py"], "/main.py": ["/module/preprocessor.py", "/module/trainer.py", "/module/predictor.py", "/metrics/metrics.py"], "/module/models/bidirectGRU.py": ["/module/models/model_utils/callbacks.py", "/utils.py"], "/module/preprocessor.py": ["/utils.py"]}
|
36,852
|
markxue-0107/Toxic_Comments
|
refs/heads/main
|
/module/models/bidirectGRU.py
|
from tensorflow.keras.layers import Dense, Input, Bidirectional, Conv1D, GRU, concatenate, LSTM
from tensorflow.keras.layers import Embedding, SpatialDropout1D, GlobalMaxPooling1D, GlobalAveragePooling1D
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from module.models.model_utils.callbacks import generate_callbacks
from utils import show_parameters
class BidirectionalGRU:
def __init__(self, config, classes, vocab_size, logger, embedding_matrix):
self.models = {}
self.logger = logger
self.vocab_size = vocab_size
self.config = config
self.classes = classes
self.nums_class = len(classes)
self.pretrained_embedding = embedding_matrix
self.model = self._build()
self.checkpoint_best_model = 'model/CommentsClassifier_BiRNN.hdf5'
self.callback_list = generate_callbacks(self.checkpoint_best_model)
def _show_training_config_para(self):
show_parameters(self.logger, self.config, 'Training')
#title = "Training config parameters"
#self.logger.info(title.center(40, '-'))
#self.logger.info("---model_name = {}".format(self.config['model_name']))
#self.logger.info("---max_input_len = {}".format(self.config['max_len']))
#self.logger.info("---batch_size = {}".format(self.config['batch_size']))
#self.logger.info("---dropout = {}".format(self.config['dropout']))
#self.logger.info("---epochs = {}".format(self.config['epochs']))
#self.logger.info("---num_of_classes = {}".format(self.nums_class))
def _build(self):
self._show_training_config_para()
inputs = Input(shape=(self.config['max_len'], ))
if self.pretrained_embedding is not None:
self.logger.info("Found embedding matrix, setting trainable=false")
embedding = Embedding(self.vocab_size,
self.config['embedding_col'],
weights=[self.pretrained_embedding],
input_length=self.config['max_len'],
trainable=False)
else:
self.logger.info("Not found embedding matrix, skip using pretrained model, setting trainable=true")
embedding = Embedding(self.vocab_size,
self.config['embedding_col'],
embeddings_initializer='uniform',
input_length=self.config['max_len'],
trainable=True)
x = embedding(inputs)
x = SpatialDropout1D(0.2)(x)
x = Bidirectional(LSTM(32, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))(x)
x = Bidirectional(GRU(32, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))(x)
#x = Conv1D(64, kernel_size=3, padding='valid', kernel_initializer='glorot_uniform')(x)
avg_pooling = GlobalAveragePooling1D()(x)
max_pooling = GlobalMaxPooling1D()(x)
x = concatenate([avg_pooling, max_pooling])
outputs = Dense(6, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-3), metrics=['accuracy'])
model.summary()
return model
def fit_and_validate(self, train_x, train_y, validate_x, validate_y):
history = self.model.fit(train_x,
train_y,
epochs=self.config['epochs'],
verbose=True,
validation_data=(validate_x, validate_y),
batch_size=self.config['batch_size'],
callbacks=self.callback_list)
predictions = self.predict(validate_x)
return predictions, history
def predict_prob(self, validate_x):
return self.model.predict(validate_x)
def predict(self, validate_x):
probs = self.model.predict(validate_x)
return probs >= 0.5
|
{"/module/trainer.py": ["/module/models/textCNN.py", "/module/models/transformer.py", "/module/models/bidirectGRU.py"], "/main.py": ["/module/preprocessor.py", "/module/trainer.py", "/module/predictor.py", "/metrics/metrics.py"], "/module/models/bidirectGRU.py": ["/module/models/model_utils/callbacks.py", "/utils.py"], "/module/preprocessor.py": ["/utils.py"]}
|
36,853
|
markxue-0107/Toxic_Comments
|
refs/heads/main
|
/module/preprocessor.py
|
import pandas as pd
import numpy as np
import io
from tensorflow import keras
from sklearn.model_selection import train_test_split
from utils import remove_punctuation, show_parameters
class Preprocessor:
def __init__(self, config, classes, logger):
self.config = config
self.logger = logger
self.classes = classes
self.train_data, self.train_labels = None, None
self.train_x, self.validate_x, self.train_y, self.validate_y = None, None, None, None
self.embedding_matrix = None
self.vocab_size = 0
self._load_data()
self.word2ind = {}
self.ind2word = {}
def _show_config_parameters(self):
show_parameters(self.logger, self.config, 'Preprocessing')
#title = "config parameters"
#self.logger.info("Getting config parameters...")
#self.logger.info(title.center(40, '-'))
#self.logger.info("---split_ratio = {}".format(self.config['split_ratio']))
#self.logger.info("---random_state = {}".format(self.config['random_state']))
#self.logger.info("---max_len = {}".format(self.config['max_len']))
def _load_data(self):
self._show_config_parameters()
self.logger.info("Loading training and test data...")
orig_data = pd.read_csv(self.config['input_data_path'])
test_data = pd.read_csv(self.config['test_data_path'])
orig_data[self.config['input_text']].fillna("unknown", inplace=True)
test_data[self.config['input_text']].fillna("unknown", inplace=True)
self.train_data, self.train_labels = self._parse_trained_data(orig_data)
self.logger.info("Spliting datasets, the split ratio is {}, random state is {}".format(self.config['split_ratio'],
self.config['random_state']))
self.train_x, self.validate_x, self.train_y, self.validate_y = train_test_split(
self.train_data,
self.train_labels,
test_size=self.config['split_ratio'],
random_state=self.config['random_state'])
self.test_data, self.test_id = self._parse_test_data(test_data)
self.logger.info("Loading done.")
def _parse_trained_data(self, orig_data):
self.logger.info("Parsing input text...")
text = orig_data[self.config['input_text']]
labels = orig_data[self.classes].values
train_data = remove_punctuation(text).values
return train_data, labels
def _parse_test_data(self, test_data):
self.logger.info("Parsing test dataset...")
text = remove_punctuation(test_data[self.config['input_text']]).values
ids = test_data.id.values
return text, ids
def process(self):
text_converter = self.config['text_converter']
train_data, train_labels, train_x, validate_x, train_y, validate_y, test_data = \
self.train_data, self.train_labels, self.train_x, self.validate_x, self.train_y, self.validate_y, self.test_data
if text_converter == 'neural_network_vecterization':
train_x, validate_x, test_data = self.nn_vecterization(train_x, validate_x, test_data)
return train_data, train_labels, train_x, validate_x, train_y, validate_y, test_data
def nn_vecterization(self, train_x, validate_x, test_data):
# initialize hash table for word-->id and id-->word
self.logger.info("Vecterizing data for neural network training...")
specialchars = ['<pad>', '<unk>']
pretrained_embedding = self.config.get('pretrained_embedding', None)
if pretrained_embedding is not None:
self.logger.info("Detected pretrained embedding.")
self.logger.info("Loading pretrained embeddings {}".format(pretrained_embedding))
embedding = Preprocessor.load_word_embedding(pretrained_embedding)
self.logger.info("Loading done")
self.logger.info("Creating vocabulary...")
vocabs = specialchars + list(embedding.keys())
self.vocab_size = len(vocabs)
self.embedding_matrix = np.zeros((self.vocab_size, self.config['embedding_col']))
for token in specialchars:
embedding[token] = np.random.uniform(low=-1, high=1, size=(self.config['embedding_col']))
for index, word in enumerate(vocabs):
self.word2ind[word] = index
self.ind2word[index] = word
self.embedding_matrix[index] = embedding[word]
else:
def add_word(word2ind, ind2word, word):
if word in word2ind:
return
ind2word[len(word2ind)] = word
word2ind[word] = len(word2ind)
for char in specialchars:
add_word(self.word2ind, self.ind2word, char)
self.logger.info("Creating vocabulary....")
for sentence in train_x:
for word in sentence:
add_word(self.word2ind, self.ind2word, word)
self.vocab_size = len(self.word2ind.keys())
self.logger.info("Done. Got {} words".format(len(self.word2ind.keys())))
self.logger.info("Preparing data for training...")
train_x_in = []
for sentence in train_x:
indices = [self.word2ind.get(word, self.word2ind['<unk>']) for word in sentence]
train_x_in.append(indices)
train_x_in = np.array(train_x_in)
validate_x_in = []
for sentence in validate_x:
indices = [self.word2ind.get(word, self.word2ind['<unk>']) for word in sentence]
validate_x_in.append(indices)
validate_x_in = np.array(validate_x_in)
test_data_in = []
for sentence in test_data:
indices = [self.word2ind.get(word, self.word2ind['<unk>']) for word in sentence]
test_data_in.append(indices)
test_data_in = np.array(test_data_in)
train_x_in = keras.preprocessing.sequence.pad_sequences(train_x_in,
maxlen=self.config['max_len'],
padding='post',
value=self.word2ind['<pad>'])
validate_x_in = keras.preprocessing.sequence.pad_sequences(validate_x_in,
maxlen=self.config['max_len'],
padding='post',
value=self.word2ind['<pad>'])
test_data_in = keras.preprocessing.sequence.pad_sequences(test_data_in,
maxlen=self.config['max_len'],
padding='post',
value=self.word2ind['<pad>'])
return train_x_in, validate_x_in, test_data_in
@staticmethod
def load_word_embedding(filename):
file_in = io.open(filename, 'r', encoding='utf-8', newline='\n', errors='ignore')
data = {}
for line in file_in:
tokens = line.rstrip().split(' ')
data[tokens[0]] = np.array(list(map(float, tokens[1:])))
return data
|
{"/module/trainer.py": ["/module/models/textCNN.py", "/module/models/transformer.py", "/module/models/bidirectGRU.py"], "/main.py": ["/module/preprocessor.py", "/module/trainer.py", "/module/predictor.py", "/metrics/metrics.py"], "/module/models/bidirectGRU.py": ["/module/models/model_utils/callbacks.py", "/utils.py"], "/module/preprocessor.py": ["/utils.py"]}
|
36,854
|
markxue-0107/Toxic_Comments
|
refs/heads/main
|
/metrics/metrics.py
|
import matplotlib.pyplot as plt
class Metrics:
def __init__(self, model_name, history):
self.history = history
self.model_name = model_name
def history_plot(self):
plt.figure(figsize=(8, 10))
# plot loss
plt.subplot(2, 1, 1)
plt.title('Cross Entropy Loss - ' + self.model_name, fontsize=12)
plt.plot(self.history.history['loss'], color='blue', label='train')
plt.plot(self.history.history['val_loss'], color='orange', label='val')
plt.xlabel("Epochs", fontsize=12)
plt.ylabel("Loss", fontsize=12)
plt.legend(loc='upper right')
# plot accuracy
plt.subplot(2, 1, 2)
plt.title('Classification Accuracy ' + self.model_name, fontsize=10)
plt.plot(self.history.history['accuracy'], color='blue', label='train')
plt.plot(self.history.history['val_accuracy'], color='orange', label='val')
plt.xlabel("Epochs", fontsize=12)
plt.ylabel("Accuracy", fontsize=12)
plt.legend(loc='lower right')
plt.show()
|
{"/module/trainer.py": ["/module/models/textCNN.py", "/module/models/transformer.py", "/module/models/bidirectGRU.py"], "/main.py": ["/module/preprocessor.py", "/module/trainer.py", "/module/predictor.py", "/metrics/metrics.py"], "/module/models/bidirectGRU.py": ["/module/models/model_utils/callbacks.py", "/utils.py"], "/module/preprocessor.py": ["/utils.py"]}
|
36,855
|
markxue-0107/Toxic_Comments
|
refs/heads/main
|
/module/models/model_utils/callbacks.py
|
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.models import load_model
def generate_callbacks(model_path):
early_stopping = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=2)
check_point = ModelCheckpoint(model_path,
monitor='val_accuracy',
mode='max',
verbose=1,
save_best_only=True)
return [early_stopping, check_point]
def load_trained_model(logger, model_path):
logger.info("Loading saved model {}...".format(model_path))
saved_model = load_model(model_path)
return saved_model
|
{"/module/trainer.py": ["/module/models/textCNN.py", "/module/models/transformer.py", "/module/models/bidirectGRU.py"], "/main.py": ["/module/preprocessor.py", "/module/trainer.py", "/module/predictor.py", "/metrics/metrics.py"], "/module/models/bidirectGRU.py": ["/module/models/model_utils/callbacks.py", "/utils.py"], "/module/preprocessor.py": ["/utils.py"]}
|
36,856
|
markxue-0107/Toxic_Comments
|
refs/heads/main
|
/module/models/transformer.py
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.optimizers import Adam
class MultiHeadAttention(layers.Layer):
def __init__(self, d_model, num_heads=8):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.embed_dim = d_model
assert self.embed_dim % self.num_heads == 0, "Embedding dimension should be divisible by number of heads"
self.depth = self.embed_dim // self.num_heads
self.query_dense = layers.Dense(self.embed_dim)
self.value_dense = layers.Dense(self.embed_dim)
self.key_dense = layers.Dense(self.embed_dim)
self.dense_layer = layers.Dense(self.embed_dim)
def scaled_dot_product_attention(self, query, key, value):
matmul_qk = tf.matmul(query, key, transpose_b=True)
# scale
dk = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
output = tf.matmul(attention_weights, value)
return output, attention_weights
def split_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs):
batch_size = tf.shape(inputs)[0]
query = self.query_dense(inputs)
key = self.key_dense(inputs)
value = self.value_dense(inputs)
query = self.split_heads(query, batch_size)
key = self.split_heads(key, batch_size)
value = self.split_heads(value, batch_size)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
attention, weights = self.scaled_dot_product_attention(query, key, value)
attention = tf.transpose(attention, perm=[0, 2, 1, 3])
concat_attention = tf.reshape(attention, (batch_size, -1, self.embed_dim))
output = self.dense_layer(concat_attention)
return output
class EncoderLayer(layers.Layer):
def __init__(self, embed_dim, nums_head, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(embed_dim, nums_head)
self.ffn = keras.Sequential([
layers.Dense(dff, activation='relu'),
layers.Dense(embed_dim)])
self.layernorm1 = keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = keras.layers.Dropout(rate)
self.dropout2 = keras.layers.Dropout(rate)
def call(self, inputs, training):
attention_output = self.mha(inputs)
attention_output = self.dropout1(attention_output, training=training)
out1 = self.layernorm1(inputs + attention_output)
ffn_out = self.ffn(out1)
ffn_out = self.dropout2(ffn_out, training=training)
return self.layernorm2(out1 + ffn_out)
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, max_len, vocab_size, embed_dim, pretrained_embedding=None):
super(TokenAndPositionEmbedding, self).__init__()
self.max_len = max_len
self.vocab_size = vocab_size
self.embed_dim = embed_dim
self.pretrained_embedding = pretrained_embedding
if pretrained_embedding is not None:
self.token_embed = layers.Embedding(input_dim=vocab_size,
output_dim=embed_dim,
weights=[pretrained_embedding],
input_length=max_len,
trainable=False)
else:
self.token_embed = layers.Embedding(input_dim=vocab_size,
output_dim=embed_dim,
input_length=max_len,
embeddings_initializer='uniform',
trainable=True)
self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim)
def call(self, x):
max_len = tf.shape(x)[-1]
positions = tf.range(start=0, limit=max_len, delta=1)
positions = self.pos_embed(positions)
x = self.token_embed(x)
return x + positions
class Transformer(object):
def __init__(self, config, classes, vocab_size, logger, embedding_matrix):
self.models = {}
self.logger = logger
self.vocab_size = vocab_size
self.classes = classes
self.config = config
self.pretrained_embedding = embedding_matrix
self.nums_classes = len(classes)
self.model = self._build()
self.checkpoint_best_model = 'model/CommentsClassifier_Transformer.hdf5'
def _show_training_config_para(self):
title = "Training config parameters"
self.logger.info(title.center(40, '-'))
self.logger.info("---model_name = {}".format(self.config['model_name']))
self.logger.info("---max_input_len = {}".format(self.config['max_len']))
self.logger.info("---batch_size = {}".format(self.config['batch_size']))
self.logger.info("---dropout = {}".format(self.config['dropout']))
self.logger.info("---epochs = {}".format(self.config['epochs']))
self.logger.info("---num_of_classes = {}".format(self.nums_classes))
self.logger.info("---dff = {}".format(self.config['dff']))
self.logger.info("---nums_head = {}".format(self.config['nums_head']))
self.logger.info("---nums_layer = {}".format(self.config['nums_layer']))
def _build(self):
self._show_training_config_para()
embedding_dim = self.config['embedding_col']
max_len = self.config['max_len']
dff = self.config['dff']
nums_head = self.config['nums_head']
vocab_size = self.vocab_size
drop_out_rate = self.config['dropout']
#nums_layer = self.config['nums_layer']
inputs = layers.Input(shape=(max_len,))
embedding_layer = TokenAndPositionEmbedding(max_len, vocab_size, embedding_dim, self.pretrained_embedding)
x = embedding_layer(inputs)
enc_layer = EncoderLayer(embedding_dim, nums_head, dff)
x = enc_layer(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dropout(drop_out_rate)(x)
x = layers.Dense(64, activation='relu')(x)
x = layers.Dropout(drop_out_rate)(x)
x = layers.Dense(self.nums_classes, activation=None)(x)
outputs = layers.Dense(self.nums_classes, activation='sigmoid')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=Adam(1e-3), loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
return model
def fit_and_validate(self, train_x, train_y, validate_x, validate_y):
history = self.model.fit(train_x,
train_y,
epochs=self.config['epochs'],
verbose=True,
validation_data=(validate_x, validate_y),
batch_size=self.config['batch_size'])
predictions = self.predict(validate_x)
return predictions, history
def predict_prob(self, validate_x):
return self.model.predict(validate_x)
def predict(self, validate_x):
probs = self.model.predict(validate_x)
return probs >= 0.5
|
{"/module/trainer.py": ["/module/models/textCNN.py", "/module/models/transformer.py", "/module/models/bidirectGRU.py"], "/main.py": ["/module/preprocessor.py", "/module/trainer.py", "/module/predictor.py", "/metrics/metrics.py"], "/module/models/bidirectGRU.py": ["/module/models/model_utils/callbacks.py", "/utils.py"], "/module/preprocessor.py": ["/utils.py"]}
|
36,857
|
markxue-0107/Toxic_Comments
|
refs/heads/main
|
/utils.py
|
import re
import string
def remove_punctuation(orig_data):
data_copy = orig_data.copy()
for index in orig_data.index:
line = orig_data[index].strip().lower().replace('\n', '')
words = re.split(r'\W+', line)
filter_table = str.maketrans('', '', string.punctuation)
data_copy[index] = [w.translate(filter_table) for w in words if len(w.translate(filter_table))]
return data_copy
def show_parameters(logger, config, phase):
title = phase + " config parameters"
logger.info(title.center(40, '-'))
for para in config:
logger.info("---{} = {}".format(para, config[para]))
return
|
{"/module/trainer.py": ["/module/models/textCNN.py", "/module/models/transformer.py", "/module/models/bidirectGRU.py"], "/main.py": ["/module/preprocessor.py", "/module/trainer.py", "/module/predictor.py", "/metrics/metrics.py"], "/module/models/bidirectGRU.py": ["/module/models/model_utils/callbacks.py", "/utils.py"], "/module/preprocessor.py": ["/utils.py"]}
|
36,862
|
V1nceZhang/spider_sklearn
|
refs/heads/master
|
/spider_sklearn/items.py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SklearnFuncItem(scrapy.Item):
func_name = scrapy.Field()
class SklearnArgItem(scrapy.Item):
arg_name = scrapy.Field()
sklearn_func_id = scrapy.Field()
arg_value = scrapy.Field()
arg_doc = scrapy.Field()
|
{"/spider_sklearn/spiders/sklearn_spider.py": ["/spider_sklearn/mysql_util.py"]}
|
36,863
|
V1nceZhang/spider_sklearn
|
refs/heads/master
|
/spider_sklearn/mysql_util.py
|
import pymysql
class MysqlUtil:
# 数据库连接
conn = ''
def __init__(self, host, user, password, DB):
# 打开数据库连接
self.conn = pymysql.connect(host, user, password, DB)
def __del__(self):
# 断开数据库连接
self.conn.close()
def insert_func(self, func_name, func_package):
# 使用cursor()方法获取操作游标
cursor = self.conn.cursor()
# SQL 插入语句
sql = "INSERT INTO config_scikitlearn_func (func_name, func_package, is_regresss, is_show, node_id, n_input, n_output) VALUES (%s, %s, 1, 1, 30, 1, 1)"
try:
# 执行sql语句
cursor.execute(sql, (func_name, func_package))
# 提交到数据库执行
self.conn.commit()
return cursor.lastrowid
except Exception as e:
print(e)
# 如果发生错误则回滚
self.conn.rollback()
return 'error'
def insert_arg(self, arg_name, sklearn_func_id, arg_value, is_optional, arg_doc):
# 使用cursor()方法获取操作游标
cursor = self.conn.cursor()
# SQL 插入语句
sql = "INSERT INTO config_scikitlearn_arg (arg_name, arg_value, is_show, scikitlearn_func_id, is_optional, arg_doc) VALUES (%s, %s, 1, %s, %s, %s)"
try:
# 执行sql语句
cursor.execute(sql, (arg_name, arg_value, sklearn_func_id, is_optional, arg_doc))
# 提交到数据库执行
self.conn.commit()
return cursor.lastrowid
except Exception as e:
print(e)
# 如果发生错误则回滚
self.conn.rollback()
return 'error'
def format_arg(self, arg_type):
args = '%' + arg_type + '%'
arg_type = arg_type + ','
cursor = self.conn.cursor()
# sql = "UPDATE config_scikitlearn_arg SET arg_type = ''"
# sql = "UPDATE config_scikitlearn_arg SET arg_type = CONCAT(arg_type, %s) WHERE arg_value LIKE %s"
sql = "UPDATE config_scikitlearn_arg SET arg_type = CONCAT(arg_type, 'dict') WHERE arg_type=''"
try:
# 执行sql语句
cursor.execute(sql)
# cursor.execute(sql, (arg_type, args))
# 提交到数据库执行
self.conn.commit()
return cursor.lastrowid
except Exception as e:
print(e)
# 如果发生错误则回滚
self.conn.rollback()
return 'error'
# mysql_util = MysqlUtil('localhost', 'root', '1234', 'AI_config')
mysql_util = MysqlUtil('10.28.0.196', 'AI_admin', '#O2hs7lSjug5ePEY', 'AI_config')
types = ['object', 'func', 'float', 'dict', 'bool', 'int', 'str', 'array-like', 'shape', 'size', 'RandomState', 'callable', 'None']
for type in types:
mysql_util.format_arg(type)
# print(mysql_util.insert_func('hello'))
# print(mysql_util.insert_arg('zzz', 1, 'www', 'xxx'))
|
{"/spider_sklearn/spiders/sklearn_spider.py": ["/spider_sklearn/mysql_util.py"]}
|
36,864
|
V1nceZhang/spider_sklearn
|
refs/heads/master
|
/spider_sklearn/spiders/sklearn_spider.py
|
import scrapy
import re
from spider_sklearn.mysql_util import MysqlUtil
class SklearnSpider(scrapy.Spider): # 使用debug文件进行运行,运行参数为:crawl sklearn
name = "sklearn"
allowed_domains = ["scikit-learn.org"] # 域名
start_urls = [
"https://scikit-learn.org/stable/modules/classes.html" # 网页
]
mysql_util = MysqlUtil('10.28.0.196', 'AI_admin', '#O2hs7lSjug5ePEY', 'AI_config') # 数据库配置
sklearn_names = ['cluster', 'isotonic', 'calibration', 'cluster.bicluster', 'compose', 'covariance',
'cross_decomposition', 'datasets', 'decomposition', 'discriminant_analysis', 'dummy', 'ensemble',
'exceptions', 'feature_extraction', 'feature_selection', 'gaussian_process', 'impute',
'kernel_approximation', 'kernel_ridge', 'linear_model', 'manifold', 'metrics', 'mixture',
'model_selection', 'multiclass', 'multioutput', 'naive_bayes', 'neighbors', 'neural_network',
'pipeline', 'preprocessing', 'random_projection', 'semi_supervised', 'svm', 'tree',
'utils'] # 需要爬取的算法包名
def parse(self, response):
for sklearn_name in self.sklearn_names:
sklearn = response.xpath('//td/a[@class="reference internal"]') # 获取sklearn信息载体
urls = sklearn.xpath('@href').re('^generated/sklearn.' + sklearn_name + '.[A-Z][A-Za-z.]+') # 获取详情url
func_names = sklearn.xpath('code/span/text()').re('^' + sklearn_name + '.[A-Z][A-Za-z.]+') # 获取方法名
if len(urls) == len(func_names): # 如果长度相同,表示数据没有混乱
for i in range(len(func_names)):
func_name = re.search(r'[A-Z][A-Za-z.]+$', func_names[i]).group() # 正则表达式抽取方法名
sklearn_func_id = self.mysql_util.insert_func(func_name, sklearn_name) # 方法名存入数据库,返回当前存入id
request = scrapy.Request('https://scikit-learn.org/stable/modules/' + urls[i],
meta={'sklearn_func_id': sklearn_func_id}, callback=self.parse_arg) # 模拟请求
yield request
def parse_arg(self, response):
sklearn_func_id = response.meta['sklearn_func_id'] # 获取数据库对应的方法id
sklearn_arg = response.xpath('//td[@class="field-body"]/dl')[0] # 获取参数信息载体
arg_names = sklearn_arg.xpath('dt/strong/text()').extract() # 获取参数名
arg_values = sklearn_arg.xpath('dt/span[@class="classifier"]/text()').extract() # 获取参数详情信息
arg_docs = sklearn_arg.xpath('dd/p').extract() # 获取参数介绍
if len(arg_names) == len(arg_values) & len(arg_docs) == len(arg_values):
for i in range(len(arg_names)):
if re.search(r'optional', arg_values[i]):
self.mysql_util.insert_arg(arg_names[i], sklearn_func_id, arg_values[i], 1, arg_docs[i]) # 参数存入数据库
else:
self.mysql_util.insert_arg(arg_names[i], sklearn_func_id, arg_values[i], 0, arg_docs[i]) # 参数存入数据库
# if re.search(r'optional', 'float, optional, default: 0.5'):
# print('yes')
# else:
# print('no')
|
{"/spider_sklearn/spiders/sklearn_spider.py": ["/spider_sklearn/mysql_util.py"]}
|
36,873
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day1/helloworld/views.py
|
from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
def hello(request, message= 'HeXA'):
return HttpResponse('Hello, World! Your number is ' + message)
def me(request, name, group, role):
t = get_template('index.html')
context = Context({'first': name,
'second': group,
'third': role,})
html = t.render(context)
return HttpResponse(html, mimetype = 'text/html;charset=UTF-8')
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,874
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day4/accounts/models.py
|
from django.db import models
from django.contrib.auth.models import User
import hashlib
# Create your models here.
class Account(models.Model):
user = models.OneToOneField(User)
def gravatar_url(self):
return "http://www.gravatar.com/avatar/%s?s=50" % hashlib.md5(self.user.email).hexdigest()
def __unicode__(self):
return self.user
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,875
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day5/webtoon/admin.py
|
from django.contrib import admin
from webtoon.models import *
# Register your models here.
class AuthorAdmin(admin.ModelAdmin):
list_display = ('name', 'desc')
admin.site.register(Author, AuthorAdmin)
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,876
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day4/articles/models.py
|
from django.db import models
from django.utils import timezone
from django.utils.encoding import smart_unicode
import datetime
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length=200, blank=False)
body = models.TextField()
pub_date = models.DateTimeField('date published')
likes = models.IntegerField()
class Meta:
ordering = ["pub_date"]
def __unicode__(self):
return self.title
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date < now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,877
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day6/snippets/serializers.py
|
from django.forms import widgets
from rest_Framework import serializers
from .models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
class SnippetSerializer(serializers.Serializer):
pk = serializers.Field()
title = serializers.CharField(required=False, max_length=100)
code = serializers.CharField(widget=widgets.Textaream max_length=100000)
linenos = serializers.BooleanField(required=False)
language = serializers.ChoiceField(choices=LANGUAGE_CHOICES, default='python')
style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly')
def restore_object(self, attrs, instance=None):
if instance:
instance.title = attrs.get('title', instance.title)
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,878
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day5/blog/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
def me(request):
return HttpResponse('Hi :)')
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,879
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day1/post/views.py
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def write_post(request):
return HttpResponse("장고 어렵지 아니해")
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,880
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day4/articles/urls.py
|
from django.conf.urls import patterns, include, url
from articles.views import HelloTemplate
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'day4.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', 'articles.views.articles'),
url(r'^all/$', 'articles.views.articles', name='all'),
url(r'^get/(?P<article_id>\d+)/$', 'articles.views.article', name='get'),
url(r'^language/(?P<language>[a-z\-]+)/$', 'articles.views.language', name='language'),
url(r'^hello/', 'articles.views.hello'),
url(r'^hello_class/', HelloTemplate.as_view()),
)
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,881
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day2/day2/urls.py
|
from django.conf.urls import patterns, include, url
import os
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'day2.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^webtoon/$', 'webtoon.views.home', name='webootn'),
url(r'^webtoon/author/(?P<author_id>\d+)/$', 'webtoon.views.author', name='author'),
url(r'^webtoon/(?P<comic_id>\d+)/$', 'webtoon.views.comic', name='comic'),
url(r'^webtoon/(?P<comic_id>\d+)/(?P<episode_id>\d+)/$', 'webtoon.views.episode', name='episode'),
('^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root':
os.path.join(os.path.dirname(__file__),'media')}
),
url(r'^admin/', include(admin.site.urls)),
)
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,882
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day3/signups/views.py
|
from django.shortcuts import render, render_to_response, RequestContext, HttpResponseRedirect
from django.contrib import messages
from .forms import SignUpForm
# Create your views here.
def home(request):
form = SignUpForm(request.POST or None)
context = RequestContext(request)
if form.is_valid():
# `commit=False`: before save it to database, just keep it in memory
save_it = form.save(commit=False)
save_it.save()
messages.success(request, 'Thank you for joining')
return HttpResponseRedirect('/thank-you/')
return render_to_response("signup.html", locals(), context_instance=context)
def thankyou(request):
context = RequestContext(request)
return render_to_response("thankyou.html", locals(), context_instance=context)
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,883
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day4/accounts/forms.py
|
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
from django import forms
from django.utils.html import strip_tags
class AccountForm(UserCreationForm):
username = forms.CharField(widget=forms.widgets.TextInput(attrs={'placeholder': 'Username'}))
email = forms.EmailField(required=True, widget=forms.widgets.TextInput(attrs={'placeholder': 'Email'}))
password1 = forms.CharField(widget=forms.widgets.PasswordInput(attrs={'text': '123', 'placeholder': 'Password'}))
password2 = forms.CharField(widget=forms.widgets.PasswordInput(attrs={'placeholder': 'Password Confirmation'}))
class Meta:
model = User
fields = ('username', 'password1', 'password2', 'email',)
def is_valid(self):
form = super(AccountForm, self).is_valid()
for f, error in self.errors.iteritems():
if f != '__all_':
self.fields[f].widget.attrs.update({'class': 'error', 'value': strip_tags(error)})
return form
class AccountAuthForm(AuthenticationForm):
username = forms.CharField(widget=forms.widgets.TextInput(attrs={'placeholder': 'Username'}))
password = forms.CharField(widget=forms.widgets.PasswordInput(attrs={'placeholder': 'Password'}))
def is_valid(self):
form = super(AccountAuthForm, self).is_valid()
for f, error in self.errors.iteritems():
if f != '__all__':
self.fields[f].widget.attrs.update({'class': 'error', 'value': strip_tags(error)})
return form
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,884
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day4/accounts/views.py
|
from django.shortcuts import render, RequestContext, render_to_response
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth import login, authenticate, logout
from .forms import AccountForm, AccountAuthForm
# Create your views here.
def login_view(request):
if request.method == 'POST':
form = AuthenticateForm(data=request.POST)
if form.is_valid():
login(request, form.get_user())
# Success
return redirect('/')
else:
# Failure
return index(request, auth_form=form)
return redirect('/')
def logout_view(request):
logout(request)
return redirect('/')
def join(request):
form = AccountForm(request.POST or None)
context = RequestContext(request)
if form.is_valid():
# `commit=False`: before save it to database, just keep it in memory
save_it = form.save(commit=False)
save_it.save()
messages.success(request, 'Thank you for joining')
#return HttpResponseRedirect(reverse('articles:all'))
return render_to_response("join.html", locals(), context_instance=context)
return render_to_response("join.html", locals(), context_instance=context)
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,885
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day4/articles/admin.py
|
from articles.models import *
from django.contrib import admin
#class ArticleInline(admin.TabularInline):
# model = Article
# extra = 3
class ArticleAdmin(admin.ModelAdmin):
list_display = ('title', 'body', 'pub_date', 'likes', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['title']
fieldsets = [
(None, {'fields': ['title', 'body']}),
('Date information', {'fields': ['pub_date'], 'classes':['collapse']}),
]
#inlines = [ArticleInline]
admin.site.register(Article, ArticleAdmin)
# Register your models here.
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,886
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day2/webtoon/models.py
|
from django.db import models
from django.contrib.auth.models import User
class Author(models.Model):
name = models.CharField(max_length=200)
desc = models.TextField()
def __unicode__(self):
return self.name
class Comic(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey(Author)
desc = models.TextField()
def __unicode__(self):
return self.title
class Episode(models.Model):
title = models.CharField(max_length=200)
comic = models.ForeignKey(Comic)
img_file = models.FileField(upload_to='comics')
pub_date = models.DateTimeField()
def __unicode__(self):
return self.title
class Comment(models.Model):
user = models.ForeignKey(User)
msg = models.CharField(max_length=200)
episode = models.ForeignKey(Episode)
written_date = models.DateTimeField()
def __unicode__(self):
return "%s : %s" % ( self.user.username, self.msg )
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,887
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day3/signups/models.py
|
from django.db import models
from django.utils import timezone
from django.utils.encoding import smart_unicode
import datetime
class SignUp(models.Model):
first_name = models.CharField(max_length=120, null=True, blank=True)
last_name = models.CharField(max_length=120, null=True, blank=True)
email = models.EmailField()
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
ordering = ["timestamp"]
def __unicode__(self):
return smart_unicode(self.email)
def was_updated_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.updated < now
was_updated_recently.admin_order_field = 'updated'
was_updated_recently.boolean = True
was_updated_recently.short_description = 'Published recently?'
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,888
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day2/webtoon/admin.py
|
from webtoon.models import *
from django.contrib import admin
class AuthorAdmin(admin.ModelAdmin):
list_display = ('name', 'desc')
admin.site.register(Author, AuthorAdmin)
class ComicAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'desc')
admin.site.register(Comic, ComicAdmin)
class EpisodeAdmin(admin.ModelAdmin):
list_display = ('comic', 'title', 'pub_date')
admin.site.register(Episode, EpisodeAdmin)
class CommentAdmin(admin.ModelAdmin):
list_display = ('episode', 'user', 'msg', 'written_date')
admin.site.register(Comment, CommentAdmin)
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,889
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day3/signups/admin.py
|
from django.contrib import admin
from .models import SignUp
# Register your models here.
class SignUpAdmin(admin.ModelAdmin):
class Meta:
model = SignUp
list_display = ('email', 'first_name', 'last_name', 'was_updated_recently')
list_filter = ['timestamp']
admin.site.register(SignUp, SignUpAdmin)
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,890
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day4/accounts/urls.py
|
from django.conf.urls import patterns, include, url
from articles.views import HelloTemplate
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'day4.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^login/$', 'accounts.views.login', name='login'),
url(r'^join/$', 'accounts.views.join', name='join'),
#url(r'^auth/$', 'accounts.views.auth_view', name='auth'),
#url(r'^logout/$', 'accounts.views.logout'),
#url(r'^loggedin/$', 'accounts.views.loggedin'),
#url(r'^invalid/$', 'accounts.views.invalid_login'),
)
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,891
|
Joseamica/Easily-written-Django
|
refs/heads/master
|
/day4/articles/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
from django.shortcuts import render_to_response
from django.views.generic.base import TemplateView
from articles.models import Article
# Create your views here.
def articles(request):
language = 'en-gb'
session_language = 'en-gb'
if 'lang' in request.COOKIES:
language = request.COOKIES['lang']
if 'lang' in request.session:
session_language = request.session['lang']
return render_to_response('articles.html',
{'articles': Article.objects.all(),
'language': language,
'session_language': session_language})
def article(request, article_id=1):
return render_to_response('article.html',
{'article': Article.objects.get(id=article_id)})
def language(request, language='en-gb'):
response = HttpResponse('setting language to %s' % language)
response.set_cookie('lang', language)
request.session['lang'] = language
return response
def hello(request):
name = "carpedm30"
return render_to_response('hello.html', {'name': name})
class HelloTemplate(TemplateView):
template_name = 'hello.html'
def get_context_date(self, **kwargs):
context = super(HelloTemplate, self).get_context_date(**kwargs)
context['name'] = 'carpedm40'
return context
|
{"/day4/accounts/views.py": ["/day4/accounts/forms.py"], "/day3/signups/admin.py": ["/day3/signups/models.py"]}
|
36,892
|
Neil399399/Intagram_Crawler
|
refs/heads/master
|
/config.py
|
# tags
# coding=UTF-8
tag_list = ['food','服飾','shirt','ootd','建築','building','fashion','art','旅遊','travel','自然','nature','自拍','selfie','動物','animal','car','moto']
# OS version.
# macOSX or linux
version = 'linux'
# instagram html class name.
posts_class_name = '.Nnq7C.weEfm a'
each_post_class_name = '.FFVAD'
user_ID_class_name = '.e1e1d a'
# get_user_profile.
user_name = '.AC5d8.notranslate'
user_desc = '.-vDIg'
user_photo = '._6q-tv'
user_statistics = '.Y8-fY '
|
{"/storage/solr.py": ["/storage/__init__.py"], "/crawler.py": ["/inscrawler/__init__.py", "/storage/solr.py", "/config.py"], "/inscrawler/__init__.py": ["/inscrawler/crawler.py"], "/downloader.py": ["/storage/solr.py"], "/inscrawler/crawler.py": ["/inscrawler/utils.py", "/inscrawler/__init__.py", "/config.py"]}
|
36,893
|
Neil399399/Intagram_Crawler
|
refs/heads/master
|
/storage/solr.py
|
# coding=UTF-8
from storage import solr_url
import pysolr
import requests
# search intagram username in solr.
def search(mode,key,number):
client = pysolr.Solr(solr_url)
try:
search_result = client.search(mode+':'+key,rows=number)
return search_result
except:
print('search failed.')
def writer(data):
client = pysolr.Solr(solr_url,timeout=10)
try:
client.add(data)
return 'Success save.'
except:
return 'write failed.'
|
{"/storage/solr.py": ["/storage/__init__.py"], "/crawler.py": ["/inscrawler/__init__.py", "/storage/solr.py", "/config.py"], "/inscrawler/__init__.py": ["/inscrawler/crawler.py"], "/downloader.py": ["/storage/solr.py"], "/inscrawler/crawler.py": ["/inscrawler/utils.py", "/inscrawler/__init__.py", "/config.py"]}
|
36,894
|
Neil399399/Intagram_Crawler
|
refs/heads/master
|
/inscrawler/utils.py
|
from time import sleep
import random
def instagram_int(string):
return int(string.replace(',', ''))
def retry(attempt=10, wait=0.3):
def wrap(func):
def wrapped_f(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
if attempt > 0:
sleep(wait)
retry(attempt - 1, wait)(func)(*args, **kwargs)
return wrapped_f
return wrap
def randmized_sleep(average = 1):
_min, _max = average * 1/2, average * 3/2
sleep(random.uniform(_min, _max))
|
{"/storage/solr.py": ["/storage/__init__.py"], "/crawler.py": ["/inscrawler/__init__.py", "/storage/solr.py", "/config.py"], "/inscrawler/__init__.py": ["/inscrawler/crawler.py"], "/downloader.py": ["/storage/solr.py"], "/inscrawler/crawler.py": ["/inscrawler/utils.py", "/inscrawler/__init__.py", "/config.py"]}
|
36,895
|
Neil399399/Intagram_Crawler
|
refs/heads/master
|
/crawler.py
|
# coding=UTF-8
from inscrawler import InsCrawler
import sys
import argparse
import json
from io import open
from storage.solr import writer
from config import tag_list
def usage():
return '''
python crawler.py posts -u cal_foodie -n 100 -o ./output
python crawler.py profile -u cal_foodie -o ./output
python crawler.py hashtag -t taiwan -o ./output
The default number for fetching posts via hashtag is 100.
'''
def get_posts_by_user(tag, username, number):
ins_crawler = InsCrawler()
return ins_crawler.get_user_posts(tag, username, number)
def get_profile(username):
ins_crawler = InsCrawler()
return ins_crawler.get_user_profile(username)
def get_posts_by_hashtag(tag, number):
ins_crawler = InsCrawler()
return ins_crawler.get_latest_posts_by_tag(tag, number)
# get user post from lateset tag.
def get_user_posts_by_tags(tag, number):
ins_crawler = InsCrawler()
return ins_crawler.get_user_posts_from_tag(tag, number)
def arg_required(args, fields=[]):
for field in fields:
if not getattr(args, field):
parser.print_help()
sys.exit()
def output(data, filepath):
out = json.dumps(data, ensure_ascii=False)
if filepath:
with open(filepath, 'w') as f:
f.write(out)
else:
print(out)
# save in solr.
def save_in_solr(data):
print('Save in solr ...')
print(writer(data))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Instagram Crawler',
usage=usage())
parser.add_argument('mode',
help='options: [posts, profile, hashtag]')
parser.add_argument('-pn', '--postNumber',
type=int,
help='number of returned posts')
parser.add_argument('-tn', '--tagNumber',
type=int,
help='number of returned tags')
parser.add_argument('-u', '--username',
help='instagram\'s username')
parser.add_argument('-t', '--tag',
help='instagram\'s tag name')
parser.add_argument('-o', '--output', help='output file name(json format)')
parser.add_argument('-s', '--solr', help='save output in solr stroage.')
args = parser.parse_args()
if args.mode == 'posts':
arg_required('username')
output(get_posts_by_user(None,args.username, args.number), args.output)
elif args.mode == 'profile':
arg_required('username')
output(get_profile(args.username), args.output)
elif args.mode == 'hashtag':
arg_required('tag')
output(
get_posts_by_hashtag(args.tag, args.tagNumber or 100),args.output)
elif args.mode == 'poststag':
for tag in tag_list:
user_list = get_user_posts_by_tags(tag, args.tagNumber or 100)
for userID in user_list:
save_in_solr(get_posts_by_user(tag,userID,args.postNumber))
# output(get_posts_by_user(args.tag,userID,args.postNumber),args.output)
else:
usage()
|
{"/storage/solr.py": ["/storage/__init__.py"], "/crawler.py": ["/inscrawler/__init__.py", "/storage/solr.py", "/config.py"], "/inscrawler/__init__.py": ["/inscrawler/crawler.py"], "/downloader.py": ["/storage/solr.py"], "/inscrawler/crawler.py": ["/inscrawler/utils.py", "/inscrawler/__init__.py", "/config.py"]}
|
36,896
|
Neil399399/Intagram_Crawler
|
refs/heads/master
|
/storage/__init__.py
|
# config.
solr_url = 'http://140.124.183.37:8983/solr/instagram_test2'
|
{"/storage/solr.py": ["/storage/__init__.py"], "/crawler.py": ["/inscrawler/__init__.py", "/storage/solr.py", "/config.py"], "/inscrawler/__init__.py": ["/inscrawler/crawler.py"], "/downloader.py": ["/storage/solr.py"], "/inscrawler/crawler.py": ["/inscrawler/utils.py", "/inscrawler/__init__.py", "/config.py"]}
|
36,897
|
Neil399399/Intagram_Crawler
|
refs/heads/master
|
/inscrawler/__init__.py
|
from .crawler import InsCrawler
|
{"/storage/solr.py": ["/storage/__init__.py"], "/crawler.py": ["/inscrawler/__init__.py", "/storage/solr.py", "/config.py"], "/inscrawler/__init__.py": ["/inscrawler/crawler.py"], "/downloader.py": ["/storage/solr.py"], "/inscrawler/crawler.py": ["/inscrawler/utils.py", "/inscrawler/__init__.py", "/config.py"]}
|
36,898
|
Neil399399/Intagram_Crawler
|
refs/heads/master
|
/downloader.py
|
# coding=UTF-8
from urllib.request import urlretrieve
from storage.solr import search
import sys
import os
import argparse
import json
def usage():
return '''
python3 download.py user -s solr -u userID -o output_dir
python3 download.py tag -s solr -t tag -o output_dir
'''
# arg.
def arg_required(args, fields=[]):
for field in fields:
if not getattr(args, field):
parser.print_help()
sys.exit()
# get data from storage.
def get_data(storage,mode,key,number):
if storage == 'solr':
return search(mode,key,number)
else:
print('No this storage/Data.')
# download image.
def download_imgs(search_result,output_dir):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for data in search_result:
try:
urlretrieve(data['img_url_str'][0],output_dir+'/'+data['id']+'.jpg')
except:
print("[Warning] Can't found this picture.")
continue
print('Finished download.')
# download image.
def download_img(search_result,output_dir):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for data in search_result:
if not os.path.exists(output_dir+'/'+data['post_owner'][0]):
os.mkdir(output_dir+'/'+data['post_owner'][0])
try:
urlretrieve(data['img_url_str'][0],output_dir+'/'+data['post_owner'][0]+'/'+data['id']+'.jpg')
except:
print("[Warning] Can't found this picture.")
continue
print('Finished download.')
# download content.
def download_content(search_result,filepath):
if filepath:
file = open(filepath, 'w')
for data in search_result:
try:
# remove some key that don't need.
del data['img_url']
del data['img_url_str']
del data['post_owner']
del data['post_owner_str']
del data['content_str']
del data['tag_str']
del data['_version_']
out = json.dumps(data, ensure_ascii=False)
file.write(out+'\n')
except:
print("[Warning] Can't found this content.")
continue
file.close()
print('Finished download.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Instagram Downloader',usage=usage())
parser.add_argument('mode',help='options: [user, tag]')
parser.add_argument('-s', '--storage',help='Storage.')
parser.add_argument('-d', '--download',help='what you want download.')
parser.add_argument('-u', '--userID',help='Instagram userID in storage.')
parser.add_argument('-t', '--tag',help='Instagram tag in storage.')
parser.add_argument('-n', '--number',type=int,help='number of the data you want take.')
parser.add_argument('-o', '--output',help='output folder.')
args = parser.parse_args()
# user.
if args.mode == 'user':
arg_required('userID')
arg_required('download')
if args.download == 'image':
download_imgs(get_data(args.storage,'post_owner_str',args.userID,args.number),args.output)
elif args.download == 'content':
download_content(get_data(args.storage,'post_owner_str',args.userID,args.number),args.output)
# tag.
elif args.mode == 'tag':
arg_required('tag')
arg_required('download')
if args.download == 'image':
download_imgs(get_data(args.storage,'tag_str',args.tag,args.number),args.output)
elif args.download == 'content':
download_content(get_data(args.storage,'tag_str',args.tag,args.number),args.output)
# tag-user.
elif args.mode == 'taguser':
arg_required('tag')
arg_required('download')
if args.download == 'image':
download_img(get_data(args.storage,'tag_str',args.tag,args.number),args.output)
elif args.download == 'content':
download_content(get_data(args.storage,'tag_str',args.tag,args.number),args.output)
else:
usage()
|
{"/storage/solr.py": ["/storage/__init__.py"], "/crawler.py": ["/inscrawler/__init__.py", "/storage/solr.py", "/config.py"], "/inscrawler/__init__.py": ["/inscrawler/crawler.py"], "/downloader.py": ["/storage/solr.py"], "/inscrawler/crawler.py": ["/inscrawler/utils.py", "/inscrawler/__init__.py", "/config.py"]}
|
36,899
|
Neil399399/Intagram_Crawler
|
refs/heads/master
|
/inscrawler/crawler.py
|
# coding=UTF-8
from selenium.webdriver.common.keys import Keys
from .browser import Browser
from .utils import instagram_int
from .utils import retry
from .utils import randmized_sleep
from . import secret
from time import sleep, time
from config import user_name,user_desc,user_photo,user_statistics,posts_class_name,each_post_class_name,user_ID_class_name
from config import version
class InsCrawler:
URL = 'https://www.instagram.com'
RETRY_LIMIT = 10
def __init__(self, has_screen=False):
self.browser = Browser(has_screen,version)
self.page_height = 0
def login(self):
browser = self.browser
url = '%s/accounts/login/' % (InsCrawler.URL)
browser.get(url)
u_input = browser.find_one('input[name="username"]')
u_input.send_keys(secret.username)
p_input = browser.find_one('input[name="password"]')
p_input.send_keys(secret.password)
p_input.send_keys(Keys.RETURN)
@retry()
def check_login():
if browser.find_one('input[name="username"]'):
raise Exception()
check_login()
def get_user_profile(self, username):
print('Check user profile ... ')
browser = self.browser
url = '%s/%s/' % (InsCrawler.URL, username)
browser.get(url)
try:
name = browser.find_one(user_name)
desc = browser.find_one(user_desc)
photo = browser.find_one(user_photo)
statistics = [ele.text for ele in browser.find(user_statistics)]
post_num, follower_num, following_num = statistics
return {
'name': name.text,
'desc': desc.text if desc else None,
'photo_url': photo.get_attribute('src'),
'post_num': post_num,
'follower_num': follower_num,
'following_num': following_num
}
except:
return 'No found'
def get_user_posts(self, tag, username, number=None):
user_profile = self.get_user_profile(username)
if user_profile != 'No found':
if not number:
number = instagram_int(user_profile['post_num'])
return self._get_posts(tag,number,username)
def get_latest_posts_by_tag(self, tag, num):
url = '%s/explore/tags/%s/' % (InsCrawler.URL, tag)
self.browser.get(url)
return self._get_tag(num)
def get_user_posts_from_tag(self, tag, num):
url = '%s/explore/tags/%s/' % (InsCrawler.URL, tag)
self.browser.get(url)
user_list = self._get_all_tags_owner(num,tag)
return user_list
def auto_like(self, tag='', maximum=1000):
self.login()
browser = self.browser
if tag:
url = '%s/explore/tags/%s/' % (InsCrawler.URL, tag)
else:
url = '%s/explore/' % (InsCrawler.URL)
self.browser.get(url)
ele_posts = browser.find_one('._mck9w a')
ele_posts.click()
for _ in range(maximum):
heart = browser.find_one('._8scx2.coreSpriteHeartOpen')
if heart:
heart.click()
randmized_sleep(2)
left_arrow = browser.find_one('.coreSpriteRightPaginationArrow')
if left_arrow:
left_arrow.click()
randmized_sleep(2)
else:
break
def _get_posts(self,tag, num, username):
'''
To get posts, we have to click on the load more
button and make the browser call post api.
'''
TIMEOUT = 30
browser = self.browser
dict_posts = {}
pre_post_num = 0
wait_time = 1
def start_fetching(pre_post_num, wait_time):
ele_posts = browser.find(posts_class_name)
Id = 0
for ele in ele_posts:
Id +=1
key = ele.get_attribute('href')
if key not in dict_posts:
try:
ele_img = browser.find_one(each_post_class_name, ele)
content = ele_img.get_attribute('alt')
img_url = ele_img.get_attribute('src')
dict_posts[key] = {
'id': str(tag)+'-'+username+'-'+str(Id),
'tag': str(tag),
'post_owner':username,
'content': content,
'img_url': img_url
}
except:
continue
if pre_post_num == len(dict_posts):
print('Number of fetched posts: %s' % pre_post_num)
print('Wait for %s sec...' % (wait_time))
sleep(wait_time)
wait_time *= 2
browser.scroll_up(300)
else:
wait_time = 1
pre_post_num = len(dict_posts)
browser.scroll_down()
return pre_post_num, wait_time
print('Starting fetching userID: '+username+' ...')
while len(dict_posts) < num and wait_time < TIMEOUT:
pre_post_num, wait_time = start_fetching(pre_post_num, wait_time)
loading = browser.find_one('._anzsd._o5uzb')
if (not loading and wait_time > TIMEOUT/2):
break
posts = list(dict_posts.values())
print('Done. Fetched %s posts.' % len(posts))
return posts[:num]
def _get_tag(self, num):
'''
To get tags, we have to click on the load more
button and make the browser call post api.
'''
Start_time = time()
TIMEOUT = 600
browser = self.browser
dict_posts = {}
pre_post_num = 0
wait_time = 1
def start_fetching(pre_post_num, wait_time):
ele_posts = browser.find(posts_class_name)
for ele in ele_posts:
key = ele.get_attribute('href')
if key not in dict_posts:
ele_img = browser.find_one(each_post_class_name, ele)
content = ele_img.get_attribute('alt')
img_url = ele_img.get_attribute('src')
dict_posts[key] = {
'post_owner':'',
'content': content,
'img_url': img_url
}
if pre_post_num == len(dict_posts):
print('Number of fetched posts: %s' % pre_post_num)
print('Wait for %s sec...' % (wait_time))
sleep(wait_time)
wait_time *= 2
browser.scroll_up(300)
else:
wait_time = 1
pre_post_num = len(dict_posts)
browser.scroll_down()
return pre_post_num, wait_time
print('Strating fetching...')
while len(dict_posts) < num and wait_time < TIMEOUT:
pre_post_num, wait_time = start_fetching(pre_post_num, wait_time)
loading = browser.find_one('._anzsd._o5uzb')
if (not loading and wait_time > TIMEOUT/2):
break
# connect to href url and get post owner.
for key in dict_posts:
browser.get(key)
post_owner = browser.find_one(user_ID_class_name)
ID = post_owner.get_attribute('title')
dict_posts[key]['post_owner'] = ID
browser.scroll_down()
posts = list(dict_posts.values())
print('Done. Fetched %s posts.' % (min(len(posts), num)))
print('Running time:',time()-Start_time)
return posts[:num]
# get all user ID who used this tag.
def _get_all_tags_owner(self, num, tag):
'''
To get all post_owner who used this tag, we have to click on the load more
button and make the browser call post api.
'''
Start_time = time()
TIMEOUT = 600
browser = self.browser
all_tag_url = []
all_post_owner = []
pre_post_num = 0
wait_time = 1
def start_fetching(pre_post_num, wait_time):
ele_posts = browser.find(posts_class_name)
if len(ele_posts)==0:
print("Didn't find any elements in web page.")
return pre_post_num,wait_time
for ele in ele_posts:
key = ele.get_attribute('href')
if key not in all_tag_url:
all_tag_url.append(key)
if pre_post_num == len(all_tag_url):
print('Number of fetched posts: %s' % pre_post_num)
print('Wait for %s sec...' % (wait_time))
sleep(wait_time)
wait_time *= 2
browser.scroll_up(300)
else:
wait_time = 1
pre_post_num = len(all_tag_url)
browser.scroll_down()
return pre_post_num, wait_time
print('Starting fetching ... tag '+tag)
while len(all_tag_url) < num and wait_time < TIMEOUT:
pre_post_num, wait_time = start_fetching(pre_post_num, wait_time)
loading = browser.find_one('._anzsd._o5uzb')
if (not loading and wait_time > TIMEOUT/2):
break
# connect to href url and get post owner.
for url in all_tag_url:
browser.get(url)
try:
post_owner = browser.find_one(user_ID_class_name)
ID = post_owner.get_attribute('title')
if ID not in all_post_owner:
all_post_owner.append(ID)
except:
continue
browser.scroll_down()
print('Done. Fetched %s IDs.' % (min(len(all_post_owner), num)))
print('Running time:',time()-Start_time)
return all_post_owner
|
{"/storage/solr.py": ["/storage/__init__.py"], "/crawler.py": ["/inscrawler/__init__.py", "/storage/solr.py", "/config.py"], "/inscrawler/__init__.py": ["/inscrawler/crawler.py"], "/downloader.py": ["/storage/solr.py"], "/inscrawler/crawler.py": ["/inscrawler/utils.py", "/inscrawler/__init__.py", "/config.py"]}
|
36,920
|
DavidParkin/vampy-stuff
|
refs/heads/master
|
/customencoder.py
|
from datetime import datetime
import json
import numpy
class MyCustomEncoder(json.JSONEncoder):
def default(self, o):
print("Start")
print(type(o))
print(o)
if isinstance(o, vampyhost.RealTime):
print("Is old realtime")
b = o.values()
#return {'__realtime__': o.values}
return {'__realtime__': list(b)}
if isinstance(o, numpy.ndarray):
print("Is numpy")
return {'__ndarray__': o[0]}
if isinstance(o, numpy.float32):
print ("Is float")
return {'__float32__': o.tolist()}
print("Is not realtime")
print(type(o))
print(o)
print("CLASS")
print(o.__class__.__name__)
#return {'__{}__'.format(o.__class__.__name__): o.__dict__}
return json.JSONEncoder.default(self, o)
|
{"/silvet.py": ["/encoder.py"], "/pianoroll.py": ["/encoder.py"]}
|
36,921
|
DavidParkin/vampy-stuff
|
refs/heads/master
|
/hist.py
|
# coding: utf-8
import vamp
import librosa
import essentia.standard as es
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from __future__ import print_function
import essentia.standard as es
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
from __future__ import print_function
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
audio_file = '~/Music/Videos/Stitch/maj-min-E.wav'
audio, sr = librosa.load(audio_file, sr=44100, mono=True)
audio_file
audio, sr = librosa.load(audio_file, sr=44100, mono=True)
audio_file = '/home/David/Music/Videos/Stitch/maj-min-E.wav'
audio, sr = librosa.load(audio_file, sr=44100, mono=True)
audio_file = '/home/David/Music/Videos/Stitch/maj_min-E.wav'
audio, sr = librosa.load(audio_file, sr=44100, mono=True)
audio_file = '/home/David/Music/Video/Stitch/maj_min-E.wav'
audio, sr = librosa.load(audio_file, sr=44100, mono=True)
data = vamp.collect(audio, sr, "mtg-melodia:melodia")
data
hop, melody = data['vector']
print(hop)
print(melody)
plt.figure(figsize=(18,6))
plt.plot(timestamps, melody)
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
import numpy as np
timestamps = 8 * 128/44100.0 + np.arange(len(melody)) * (128/44100.0)
plt.plot(timestamps, melody)
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.show
plt.show()
plt.plot(timestamps, melody)
plt.show()
get_ipython().run_line_magic('matplotlib', 'gtk')
get_ipython().run_line_magic('matplotlib', '')
get_ipython().run_line_magic("matplotlib('classic')", '')
get_ipython().run_line_magic('matplotlib', '')
help %matplotlib
get_ipython().run_line_magic('matplotlib', '')
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('matplotlib', 'gtk2')
get_ipython().run_line_magic('matplotlib', 'tk')
get_ipython().run_line_magic('matplotlib', 'py')
get_ipython().run_line_magic('matplotlib', 'pygment')
get_ipython().run_line_magic('matplotlib', 'gtk2')
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('matplotlib', 'pyglet')
get_ipython().run_line_magic('matplotlib', 'TkAgg')
get_ipython().run_line_magic('matplotlib', 'gtk3agg')
get_ipython().run_line_magic('matplotlib', 'gtk3')
|
{"/silvet.py": ["/encoder.py"], "/pianoroll.py": ["/encoder.py"]}
|
36,922
|
DavidParkin/vampy-stuff
|
refs/heads/master
|
/encoder.py
|
from datetime import datetime
import json
import numpy
import vampyhost
class VampCustomEncoder(json.JSONEncoder):
def default(self, o):
# print(o)
if isinstance(o, vampyhost.RealTime):
b = o.values()
return {'__realtime__': list(b)}
if isinstance(o, numpy.ndarray):
return {'__ndarray__': tuple(o)}
if isinstance(o, numpy.float32):
return {'__float32__': o.tolist()}
return json.JSONEncoder.default(self, o)
def vampy_decoder(dct):
if '__realtime__' in dct:
x = dct['__realtime__'][0]
y = dct['__realtime__'][1]
return vampyhost.RealTime(x, y)
if '__float32__' in dct:
# print('__float32__')
# print(type(dct['__float32__']))
return numpy.float32(dct['__float32__'])
if '__ndarray__' in dct:
# print('__ndarray__')
# print(dct['__ndarray__'])
x = numpy.array(dct['__ndarray__'])
return x
print(type(dct))
print(dct)
return dct
|
{"/silvet.py": ["/encoder.py"], "/pianoroll.py": ["/encoder.py"]}
|
36,923
|
DavidParkin/vampy-stuff
|
refs/heads/master
|
/melodia.py
|
#!/usr/bin/python3
# coding: utf-8
from __future__ import print_function
import vamp
import librosa
#import essentia.standard as es
import matplotlib.pyplot as plt
audio_file = '/home/David/Music/Video/Stitch/maj_min-E.wav'
audio, sr = librosa.load(audio_file, sr=44100, mono=True, duration=5.0)
data = vamp.collect(audio, sr, "mtg-melodia:melodia")
hop, melody = data['vector']
print(hop)
print(melody)
import numpy as np
timestamps = 8 * 128/44100.0 + np.arange(len(melody)) * (128/44100.0)
melody_pos = melody[:]
melody_pos[melody<=0] = None
plt.figure(figsize=(18,6))
#plt.plot(timestamps, melody)
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.plot(timestamps, melody_pos)
plt.show()
|
{"/silvet.py": ["/encoder.py"], "/pianoroll.py": ["/encoder.py"]}
|
36,924
|
DavidParkin/vampy-stuff
|
refs/heads/master
|
/silvet.py
|
#!/usr/bin/python3
# coding: utf-8
# from __future__ import print_function
import json
import librosa
import vamp
#import essentia.standard as es
import matplotlib.pyplot as plt
import numpy as np
import encoder
#load audio with file
audio_file = '/home/David/Music/Video/Stitch/maj_min-E.wav'
audio, sr = librosa.load(audio_file, sr=44100, mono=True, offset=0.0, duration=5.0)
#collect silvet data
data = vamp.collect(audio, sr, "silvet:silvet")
# hop, melody = data['vector']
# print(hop)
# print(melody)
print(data)
print("Silvet data is {} long".format(len(data)))
#silvet returns list dict
silvet_data = data['list']
jserialized = json.dumps(silvet_data, indent=4, cls=encoder.VampCustomEncoder)
#import ipdb; ipdb.set_trace()
print("Done!!")
print(jserialized)
junserialized = json.loads(jserialized, object_hook=encoder.vampy_decoder)
print("Done!!")
print(junserialized)
silvet_data = junserialized
print("Silvet silvet_data is {} long".format(len(silvet_data)))
#collect melodia data
params = {"minfqr" : 220} # allow A3
data = vamp.collect(audio, sr, "mtg-melodia:melodia", parameters = params)
print("Melodia data is {} long".format(len(data)))
hop, melody = data['vector']
print("Melodia melody is {} long".format(len(melody)))
print(hop)
print(melody)
timestampsa = 8 * 128/44100.0 + np.arange(len(silvet_data)) * (128/44100.0)
timestampsm = 8 * 128/44100.0 + np.arange(len(melody)) * (128/44100.0)
valuess = []
timess = []
labels = []
for note in silvet_data:
for key in note:
print("{}: {}".format(key, note[key]), end='')
print("\n")
b = note.values()
print("b = :{}".format(b))
ts, dur, label, values = list(b)
valuess.extend(values)
timess.append(ts)
labels += [label]
del valuess[1::2]
melody_pos = melody[:]
melody_pos[melody<=0] = None
# plt.ion()
plt.figure(figsize=(18, 6))
plt.plot(timess, valuess)
# plt.plot(timestamps, melody)
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.plot(timestampsm, melody_pos)
plt.show()
|
{"/silvet.py": ["/encoder.py"], "/pianoroll.py": ["/encoder.py"]}
|
36,925
|
DavidParkin/vampy-stuff
|
refs/heads/master
|
/pianoroll.py
|
#!/usr/bin/python3
# coding: utf-8
# from __future__ import print_function
import json
import librosa
import vamp
#import essentia.standard as es
import matplotlib.pyplot as plt
import numpy as np
import encoder
def hz2midi(herz):
# convert from Hz to midi note
hz_nonneg = herz.copy()
idx = hz_nonneg <= 0
# import ipdb; ipdb.set_trace()
hz_nonneg[idx] = 1
midi = 69 + 12*np.log2(hz_nonneg/440.)
midi[idx] = 0
# midi = 69 + 12*np.log2(herz/440.)
# round
midi = np.round(midi)
return midi
#load audio with file
audio_file = '/home/David/Music/Video/Stitch/In The Mind of B.B. King_ The Thrill Is Gone Guitar Solo Lesson [360p].mp4'
audio, sr = librosa.load(audio_file, sr=44100, mono=True, offset=5.0, duration=5.0)
#collect silvet data
params = {} # allow A3
# params = {"minFreq" : 220, "maxFreq" : 500} # allow A3
data = vamp.collect(audio, sr, "silvet:silvet", parameters=params)
# mainly serial {{{
# hop, melody = data['vector']
# print(hop)
# print(melody)
# print(data)
print("Silvet data is {} long".format(len(data)))
#silvet returns list dict
silvet_data = data['list']
# jserialized = json.dumps(silvet_data, indent=4, cls=encoder.VampCustomEncoder)
# #import ipdb; ipdb.set_trace()
# print("Done!!")
# print(jserialized)
# junserialized = json.loads(jserialized, object_hook=encoder.vampy_decoder)
# print("Done!!")
# print(junserialized)
# silvet_data = junserialized
print("Silvet silvet_data is {} long".format(len(silvet_data)))
#collect melodia data
params = {"minfqr" : 220} # allow A3
data = vamp.collect(audio, sr, "mtg-melodia:melodia", parameters=params)
print("Melodia data is {} long".format(len(data)))
hop, melody = data['vector']
print("Melodia melody is {} long".format(len(melody)))
print("------- hop ---------")
print(hop)
print("------- melody ---------")
print(melody)
# }}}
timestampsa = 8 * 128/44100.0 + np.arange(len(silvet_data)) * (128/44100.0)
timestampsm = 8 * 128/44100.0 + np.arange(len(melody)) * (128/44100.0)
valuess = []
timess = []
labels = []
# ax = plt.subplots()
oldtime = vamp.vampyhost.RealTime('seconds', -1)
for note in silvet_data:
for key in note:
print("{}: {} ".format(key, note[key]), end='')
print("\n")
b = note.values()
# print("b = :{}".format(b))
ts, dur, label, values = list(b)
# import ipdb; ipdb.set_trace()
if ts < oldtime:
print("*************** time error ****************")
oldtime = ts
print(label)
y = values
midinote = hz2midi(y)
print(midinote)
yy = values[1]
plt.hlines(y=midinote[0], xmin=ts, xmax=ts+dur, color='r', linewidth=2)
plt.text(ts, midinote[0], label, ha='left', va='center')
valuess.append(midinote[0])
timess.append(ts)
labels += [label]
# del valuess[1::2]
melody_pos = melody[:]
melody_pos[melody <= 0] = None
# plt.ion()
plt.figure(figsize=(18, 6))
import ipdb; ipdb.set_trace()
plt.plot(timess, valuess, 'b-',)
# plt.plot(timestamps, melody)
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
melody_pos= librosa.hz_to_midi(melody_pos)
plt.plot(timestampsm, melody_pos, 'm-', label='melody')
plt.legend()
plt.show()
|
{"/silvet.py": ["/encoder.py"], "/pianoroll.py": ["/encoder.py"]}
|
36,926
|
DavidParkin/vampy-stuff
|
refs/heads/master
|
/librosa_beats.py
|
#!/usr/bin/python3
# coding: utf-8
#https://librosa.github.io/librosa/tutorial.html
from __future__ import print_function
import vamp
import librosa
#import essentia.standard as es
import matplotlib.pyplot as plt
audio_file = '/home/David/Music/Video/Stitch/maj_min-E.wav'
audio, sr = librosa.load(audio_file, sr=44100, mono=True, offset=0.0, duration=5.0)
# 3. Run the default beat tracker
tempo, beat_frames = librosa.beat.beat_track(y=audio, sr=sr)
print('Estimated tempo: {:.2f} beats per minute'.format(tempo))
# 4. Convert the frame indices of beat events into timestamps
beat_times = librosa.frames_to_time(beat_frames, sr=sr)
print('Saving output to beat_times.csv')
librosa.output.times_csv('beat_times.csv', beat_times)
# data = vamp.collect(audio, sr, "silvet:silvet")
# # hop, melody = data['vector']
# # print(hop)
# # print(melody)
# print(data)
# a = data['list']
# import numpy as np
# timestamps = 8 * 128/44100.0 + np.arange(len(a)) * (128/44100.0)
# valuess=[]
# timess=[]
# labels=[]
# for note in a:
# for key in note:
# print("{}: {}".format(key, note[key]), end='')
# print("\n")
# b = note.values()
# ts, dur, label, values = list(b)
# valuess.extend(values)
# timess.append(ts)
# labels+=[label]
# del valuess[1::2]
# # melody_pos = melody[:]
# # melody_pos[melody<=0] = None
# plt.figure(figsize=(18, 6))
# plt.plot(timess, valuess)
# # plt.plot(timestamps, melody)
# plt.xlabel('Time (s)')
# plt.ylabel('Frequency (Hz)')
# # plt.plot(timestamps, melody_pos)
# plt.show()
|
{"/silvet.py": ["/encoder.py"], "/pianoroll.py": ["/encoder.py"]}
|
36,927
|
DavidParkin/vampy-stuff
|
refs/heads/master
|
/NoteWorthy/noteworthy.py
|
#!/usr/bin/python3
import sys
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk
import librosa
import librosa.display
from numpy import arange, sin, pi
# import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
import matplotlib.patches as mpatches
from trepan.api import debug
import vlc
UI_INFO = """
<ui>
<menubar name='MenuBar'>
<menu action='FileMenu'>
<menu action='FileNew'>
<menuitem action='FileNewStandard' />
<menuitem action='FileNewFoo' />
<menuitem action='FileNewGoo' />
</menu>
<menuitem action='FileOpen' />
<separator />
<menuitem action='FileQuit' />
</menu>
<menu action='EditMenu'>
<menuitem action='EditCopy' />
<menuitem action='EditPaste' />
<menuitem action='EditSomething' />
</menu>
<menu action='ChoicesMenu'>
<menuitem action='ChoiceOne'/>
<menuitem action='ChoiceTwo'/>
<separator />
<menuitem action='ChoiceThree'/>
</menu>
</menubar>
<toolbar name='ToolBar'>
<toolitem action='FileNewStandard' />
<toolitem action='FileQuit' />
</toolbar>
<popup name='PopupMenu'>
<menuitem action='EditCopy' />
<menuitem action='EditPaste' />
<menuitem action='EditSomething' />
</popup>
</ui>
"""
# a Gtk ApplicationWindow
class Cursor(object):
def __init__(self, ax):
self.ax = ax
#self.lx = ax.axhline(color='k') # the horiz line
self.ly = ax.axvline(color='k', alpha=0.2) # the vert line
# text location in axes coords
self.txt = ax.text(0.7, 0.9, '', transform=ax.transAxes)
def mouse_move(self, event):
# import ipdb; ipdb.set_trace()
if not event.inaxes:
return
# if event.inaxes != self.ax2.axes: return
print(event)
# debug()
print('%s move_cursor: x=%d, y=%d, xdata=%f, ydata=%f' %
('double' if event.dblclick else 'single',
event.x, event.y, event.xdata, event.ydata))
x, y = event.xdata, event.ydata
# update the line positions
# self.lx.set_ydata(y)
self.ly.set_xdata(x)
self.txt.set_text('x=%1.2f, y=%1.2f' % (x, y))
self.ax.figure.canvas.draw_idle()
class MyWindow(Gtk.ApplicationWindow):
# constructor: the title is "Welcome to GNOME" and the window belongs
# to the application app
# cursor = None
audio_file = ""
selection_start = 0
selection_end = 0
selecting = False
# get file for analysis
def get_audio(self, filename):
#load audio with file
self.audio_file = filename
audio, sr = librosa.load(self.audio_file, sr=44100, mono=True, offset=5.0, duration=5.0)
return audio, sr
def __init__(self, app):
Gtk.Window.__init__(self, title="Welcome to GNOME", application=app)
self.y, self.sr = self.get_audio('/home/David/Music/Video/Stitch/maj_min-E.wav')
self.set_default_size(600, 400)
self.player_paused=False
self.is_player_active = False
def set_boxes_and_events(self):
'plot 1 place holder'
figure = Figure(figsize=(5, 5), dpi=100)
self.press = None
ax1 =figure.add_subplot(212)
ax1.margin = (2, 2)
ax1.set_title('One')
t = arange(0.0, 3.0, 0.01)
s = sin(2*pi*t)
ax1.plot(t, s)
# plot 2 audio waveform
self.ax2 =figure.add_subplot(211)
self.ax2.margin = (2, 2)
self.ax2.set_title('Two')
# see https://librosa.github.io/librosa/generated/librosa.display.waveplot.html#librosa.display.waveplot
librosa.display.waveplot(self.y, sr=self.sr, ax=self.ax2)
sw = Gtk.ScrolledWindow()
# A scrolled window border goes outside the scrollbars and viewport
sw.set_border_width(10)
canvas = FigureCanvas(figure) # a gtk.DrawingArea
sw.add_with_viewport(canvas)
self.cursor = Cursor(self.ax2)
self.cidpress = canvas.mpl_connect('button_press_event',self. onclick)
self.cidmotion = canvas.mpl_connect('motion_notify_event', self.cursor.mouse_move)
self.cidrelease = canvas.mpl_connect('button_release_event', self.on_release)
# Screen content
action_group = Gtk.ActionGroup("my_actions")
self.add_file_menu_actions(action_group)
self.add_edit_menu_actions(action_group)
self.add_choices_menu_actions(action_group)
uimanager = self.create_ui_manager()
uimanager.insert_action_group(action_group)
menubar = uimanager.get_widget("/MenuBar")
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
box.pack_start(menubar, False, False, 0)
toolbar = uimanager.get_widget("/ToolBar")
box.pack_start(toolbar, False, False, 0)
eventbox = Gtk.EventBox()
eventbox.connect("button-press-event", self.on_button_press_event)
box.pack_start(eventbox, True, True, 0)
label = Gtk.Label("Right-click to see the popup menu.")
eventbox.add(label)
self.popup = uimanager.get_widget("/PopupMenu")
self.playback_button = Gtk.Button()
self.stop_button = Gtk.Button()
self.loop_button = Gtk.Button()
self.play_image = Gtk.Image.new_from_icon_name(
"gtk-media-play",
Gtk.IconSize.MENU
)
self.pause_image = Gtk.Image.new_from_icon_name(
"gtk-media-pause",
Gtk.IconSize.MENU
)
self.stop_image = Gtk.Image.new_from_icon_name(
"gtk-media-stop",
Gtk.IconSize.MENU
)
self.loop_image = Gtk.Image.new_from_icon_name(
"media-playlist-repeat",
Gtk.IconSize.MENU
)
self.playback_button.set_image(self.play_image)
self.stop_button.set_image(self.stop_image)
self.loop_button.set_image(self.loop_image)
self.playback_button.connect("clicked", self.toggle_player_playback)
self.stop_button.connect("clicked", self.stop_player)
self.loop_button.connect("clicked", self.loop_player)
self.draw_area = Gtk.DrawingArea()
self.draw_area.set_size_request(300,300)
self.draw_area.modify_bg(Gtk.StateType.NORMAL, Gdk.Color(0, 0, 0))
self.draw_area.connect("realize",self._realized)
self.hbox = Gtk.Box(spacing=6)
self.hbox.pack_start(self.playback_button, True, True, 0)
self.hbox.pack_start(self.stop_button, True, True, 0)
self.hbox.pack_start(self.loop_button, True, True, 0)
#self.vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
box.pack_start(self.draw_area, True, True, 0)
box.pack_start(self.hbox, False, False, 0) # add sw - all matplot stuff
box.pack_start(sw, True, True, 0)
self.add(box)
#{{{ menu methods
def stop_player(self, widget, data=None):
self.player.stop()
self.is_player_active = False
self.playback_button.set_image(self.play_image)
def loop_player(self, widget, data=None):
self.is_looping = False
def toggle_player_playback(self, widget, data=None):
"""
Handler for Player's Playback Button (Play/Pause).
"""
if self.is_player_active == False and self.player_paused == False:
self.player.play()
self.playback_button.set_image(self.pause_image)
self.is_player_active = True
elif self.is_player_active == True and self.player_paused == True:
self.player.play()
self.playback_button.set_image(self.pause_image)
self.player_paused = False
elif self.is_player_active == True and self.player_paused == False:
self.player.pause()
self.playback_button.set_image(self.play_image)
self.player_paused = True
else:
pass
def _realized(self, widget, data=None):
self.vlcInstance = vlc.Instance("--no-xlib")
self.player = self.vlcInstance.media_player_new("--no-xlib")
self.player = vlc.MediaPlayer('/home/David/Music/Video/Stitch/maj_min-E.wav')
#self.player = vlc.MediaPlayer('/home/David/Music/Video/Stitch/maj_min-E.wav')
win_id = widget.get_window().get_xid()
self.win_id = win_id
# print(widget)
# print(win_id)
self.player.set_xwindow(win_id)
self.player.play()
self.playback_button.set_image(self.pause_image)
self.is_player_active = True
def add_file_menu_actions(self, action_group):
action_filemenu = Gtk.Action("FileMenu", "File", None, None)
action_group.add_action(action_filemenu)
action_filenewmenu = Gtk.Action("FileNew", None, None, Gtk.STOCK_NEW)
action_group.add_action(action_filenewmenu)
action_new = Gtk.Action("FileNewStandard", "_New",
"Create a new file", Gtk.STOCK_NEW)
action_new.connect("activate", self.on_menu_file_new_generic)
action_group.add_action_with_accel(action_new, None)
action_group.add_actions([
("FileNewFoo", None, "New Foo", None, "Create new foo",
self.on_menu_file_new_generic),
("FileNewGoo", None, "_New Goo", None, "Create new goo",
self.on_menu_file_new_generic),
])
action_fileopen = Gtk.Action("FileOpen", None, None, Gtk.STOCK_OPEN)
action_fileopen.connect("activate", self.on_menu_file_open)
action_group.add_action(action_fileopen)
action_filequit = Gtk.Action("FileQuit", None, None, Gtk.STOCK_QUIT)
action_filequit.connect("activate", self.on_menu_file_quit)
action_group.add_action(action_filequit)
def add_edit_menu_actions(self, action_group):
action_group.add_actions([
("EditMenu", None, "Edit"),
("EditCopy", Gtk.STOCK_COPY, None, None, None,
self.on_menu_others),
("EditPaste", Gtk.STOCK_PASTE, None, None, None,
self.on_menu_others),
("EditSomething", None, "Something", "<control><alt>S", None,
self.on_menu_others)
])
def add_choices_menu_actions(self, action_group):
action_group.add_action(Gtk.Action("ChoicesMenu", "Choices", None,
None))
action_group.add_radio_actions([
("ChoiceOne", None, "One", None, None, 1),
("ChoiceTwo", None, "Two", None, None, 2)
], 1, self.on_menu_choices_changed)
three = Gtk.ToggleAction("ChoiceThree", "Three", None, None)
three.connect("toggled", self.on_menu_choices_toggled)
action_group.add_action(three)
def create_ui_manager(self):
uimanager = Gtk.UIManager()
# Throws exception if something went wrong
uimanager.add_ui_from_string(UI_INFO)
# Add the accelerator group to the toplevel window
accelgroup = uimanager.get_accel_group()
self.add_accel_group(accelgroup)
return uimanager
def on_menu_file_new_generic(self, widget):
print("A File|New menu item was selected.")
def on_menu_file_open(self, widget):
dialog = Gtk.FileChooserDialog("Please choose a file", self,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
self.add_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.player.stop()
audio_file = dialog.get_filename()
self.player = self.vlcInstance.media_player_new("--no-xlib")
self.player = vlc.MediaPlayer(audio_file)
win_id = dialog.get_window().get_xid()
self.player.set_xwindow(self.win_id)
self.player.play()
elif response == Gtk.ResponseType.CANCEL:
print("Cancel clicked")
dialog.destroy()
def add_filters(self, dialog):
filter = Gtk.FileFilter()
filter.set_name("Music files")
filter.add_pattern("*.wav")
filter.add_pattern("*.mp3")
filter.add_pattern("*.mp4")
dialog.add_filter(filter)
filter_text = Gtk.FileFilter()
filter_text.set_name("Text files")
filter_text.add_mime_type("text/plain")
dialog.add_filter(filter_text)
filter_py = Gtk.FileFilter()
filter_py.set_name("Python files")
filter_py.add_pattern("text/x-python")
dialog.add_filter(filter_py)
filter_any = Gtk.FileFilter()
filter_any.set_name("Any files")
filter_any.add_pattern("*")
dialog.add_filter(filter_any)
def on_menu_file_quit(self, widget):
Gtk.main_quit()
def on_menu_others(self, widget):
print("Menu item " + widget.get_name() + " was selected")
def on_menu_choices_changed(self, widget, current):
print(current.get_name() + " was selected.")
def on_menu_choices_toggled(self, widget):
if widget.get_active():
print(widget.get_name() + " activated")
else:
print(widget.get_name() + " deactivated")
#}}}
def on_button_press_event(self, widget, event):
# Check if right mouse button was preseed
if event.type == Gdk.EventType.BUTTON_PRESS and event.button == 3:
self.popup.popup(None, None, None, None, event.button, event.time)
return True # event has been handled
def onclick(self, event):
# 'on button press we will see if the mouse is over us and store some data'
if event.inaxes != self.ax2.axes: return
print('%s click_init: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
('double' if event.dblclick else 'single', event.button,
event.x, event.y, event.xdata, event.ydata))
self.press = event.xdata, event.ydata
# import ipdb; ipdb.set_trace()
self.ax2.axes.axvline(event.xdata,color='k')
old_xdata = event.xdata
self.ax2.axes.figure.canvas.draw()
self.ax2.figure.canvas.draw_idle()
def on_release(self, event):
print('%s release_init: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
('double' if event.dblclick else 'single', event.button,
event.x, event.y, event.xdata, event.ydata))
# canvas.mpl_disconnect(self.cidmotion)
self.press = None
def on_motion(self, event):
if self.press is None: return
# import ipdb; ipdb.set_trace()
if event.inaxes != self.ax2.axes: return
print('%s move_init: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
('double' if event.dblclick else 'single', event.button,
event.x, event.y, event.xdata, event.ydata))
x, y = event.xdata, event.ydata
# update the line positions
# self.cursor.lx.set_ydata(y)
import ipdb; ipdb.set_trace()
self.cursor.ly.set_xdata(x)
self.cursor.txt.set_text('x=%1.2f, y=%1.2f' % (x, y))
# import ipdb; ipdb.set_trace()
# not drawing
self.ax2.axes.figure.canvas.draw()
self.ax2.figure.canvas.draw_idle()
class MyApplication(Gtk.Application):
# constructor of the Gtk Application
def __init__(self):
Gtk.Application.__init__(self)
# create and activate a MyWindow, with self (the MyApplication) as
# application the window belongs to.
# Note that the function in C activate() becomes do_activate() in Python
def do_activate(self):
win = MyWindow(self)
# show the window and all its content
# this line could go in the constructor of MyWindow as well
win.set_boxes_and_events()
win.show_all()
# start up the application
# Note that the function in C startup() becomes do_startup() in Python
def do_startup(self):
Gtk.Application.do_startup(self)
# create and run the application, exit with the value returned by
# running the program
app = MyApplication()
exit_status = app.run(sys.argv)
sys.exit(exit_status)
# c = mpatches.Rectangle((0.5, 0.5), 1, 1, facecolor="green",
# edgecolor="red", linewidth=3, alpha=0.5)
# self.ax2.add_patch(c)
|
{"/silvet.py": ["/encoder.py"], "/pianoroll.py": ["/encoder.py"]}
|
36,928
|
DavidParkin/vampy-stuff
|
refs/heads/master
|
/decoder.py
|
def vampy_decoder(dct):
if '__realtime__' in dct:
print('__realtime__')
print(dct['__realtime__'])
x = dct['__realtime__'][0]
y = dct['__realtime__'][1]
return vampyhost.RealTime(x, y)
if '__float32__' in dct:
print('__float32__')
print(type(dct['__float32__']))
return numpy.float32(dct['__float32__'])
if '__ndarray__' in dct:
print('__ndarray__')
print(dct['__ndarray__'])
x = numpy.array(dct['__ndarray__'])
print(x)
print(type(x))
#return numpy.ndarray((dct['__ndarray__']))
return x
|
{"/silvet.py": ["/encoder.py"], "/pianoroll.py": ["/encoder.py"]}
|
36,943
|
skyler1ackerman/riotApi
|
refs/heads/master
|
/dataMan_class.py
|
# %%
from dataMan import getChampNames
import json, datetime, pprint, statistics, pickle
from copy import deepcopy
from config import TOKEN, ID
from riotwatcher import LolWatcher, ApiError
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
from riotClass import *
from functools import partial
pp = pprint.PrettyPrinter(indent=4)
# %%
my_region = 'na1'
username = '<USERNAME>'
watcher = LolWatcher(TOKEN)
latest = watcher.data_dragon.versions_for_region(my_region)['n']['champion']
class User:
# TODO: Troubleshoot why this is so slow
def __init__(self, username):
self.username = username
self.data = self.loadData(f'pickle/match_det_{self.username}')
self.id = self.getUserId()
for match in self.data.matchlist:
match.get_id(self.id)
self.champ_names = self.getChampNames()
self.champ_ids = self.getChampIDs()
self.fdata = deepcopy(self.data)
def resetData(self):
self.fdata = deepcopy(self.data)
def getUserId(self):
# First match has most recent id
for part in self.data.matchlist[0].participantIdentities:
if username == part.player.summonerName:
return part.player.accountId
raise Exception('Player not found in first game!')
def filterMode(self, mode):
for match in self.fdata.matchlist:
if match.gameMode != mode:
self.fdata.matchlist.remove(match)
def filterRole(self, lane):
for match in self.fdata.matchlist:
player = match.participants[match.userIndex]
if player.timeline.lane!=lane:
self.fdata.matchlist.remove(match)
def filterLane(self, role):
for match in self.fdata.matchlist:
player = match.participants[match.userIndex]
if player.timeline.role!=role:
self.fdata.matchlist.remove(match)
def filterChamp(self, champ):
champ_id=self.getChampID(champ)
for match in self.fdata.matchlist:
player = match.participants[match.userIndex]
if player.championId != champ_id:
self.fdata.matchlist.remove(match)
def loadData(self, file):
with open(file, 'rb') as f:
return jsons.load({'matchlist': pickle.load(f)}, MatchList)
def getChampNames(self):
static_champ_list = watcher.data_dragon.champions(latest, False, 'en_US')
champ_dict = dict()
for key in static_champ_list['data']:
row = static_champ_list['data'][key]
champ_dict[row['key']] = row['id']
return champ_dict
def getChampName(self, id_):
return self.champ_names[str(id_)]
def getChampIDs(self):
return {v:int(k) for k,v in self.champ_names.items()}
def getChampID(self, name):
return self.champ_ids[name]
def getStatList(self, *stat_list):
ret_list = [[] for _ in stat_list]
for match in self.fdata.matchlist:
# Figure out which player is the user
player = match.participants[match.userIndex]
try:
for i, stat in enumerate(stat_list):
ret_list[i].append(getattr(player.stats, stat))
except KeyError: # TODO: Better way to do this?
pass # TODO: Check if this evenly spreads stats
return np.array(ret_list)
def maxStat(self, *stat_list):
return [max(x) for x in self.getStatList(*stat_list)]
def sumstat(self, *stat_list):
return [sum(x) for x in self.getStatList(*stat_list)]
def avgStat(self, *stat_list):
return [np.average(x) for x in self.getStatList(*stat_list)]
def sumTeamStat(self, stat): #TODO: Convert this to list
total = 0
for match in self.fdata.matchlist:
team_id = match.participants[match.userIndex].teamId
# Team is either 100 or 200, so we can do this
total+=getattr(match.teams[(team_id//100)-1], stat)
return total
def sumOtherTeamStat(self, stat): #TODO: Make one function?
total = 0
for match in self.fdata.matchlist:
team_id = match.participants[match.userIndex].teamId
# Team is either 100 or 200, so we can do this
total+=getattr(match.teams[1-((team_id//100)-1)], stat)
return total
def sumTime(self):
return sum(match.gameDuration for match in self.fdata.matchlist)
def maxTime(self):
return max(match.gameDuration for match in self.fdata.matchlist)
def plotHist(self, stat, bins=None, save=False, show=True, rotation=0): # TODO: Fix zeros
# Get the stats list
stat_list = self.getStatList(stat)[0]
print(stat_list)
# Get color map
cm = plt.cm.get_cmap('plasma')
# Make the histogram
range_ = max(stat_list)-min(stat_list)
if not bins:
bins = range_
try:
bin_range = np.arange(min(stat_list), max(stat_list), (range_)//bins)-.5
except ZeroDivisionError:
raise Exception('Too many bins!')
n, bins_, patches = plt.hist(stat_list, bins=bin_range)
# Change the color
bin_centers = 0.5 * (bins_[:-1] + bins_[1:])
col = bin_centers - min(bin_centers)
col /= max(col)
for c, p in zip(col, patches):
plt.setp(p, 'facecolor', cm(c))
if range_ > 15:
numticks = 15
else:
numticks = range_
tick_range = np.arange(min(stat_list), max(stat_list)-1, (range_)//numticks)
plt.xticks(tick_range, rotation=rotation)
plt.title(f'Frequency of {stat}')
plt.xlabel(f'Total {stat}')
plt.ylabel('Frequency')
plt.savefig(f'imgs/{stat}_hist_{bins}_bins_{username}') if save else 0
plt.show() if show else 0
def graphScatter(self, *stat_list, save=False, show=True):
print(stat_list)
if len(stat_list)!=2:
raise Exception('Only supported for two stats vs eachother')
ret_list = self.getStatList(*stat_list)
plt.scatter(ret_list[0], ret_list[1], c=np.multiply(ret_list[0], ret_list[1]), cmap='plasma')
plt.xlabel(stat_list[0])
plt.ylabel(stat_list[1])
title = f'{stat_list[0]} vs {stat_list[1]}'
plt.title(title)
plt.savefig(f'imgs/scatter/{title}_scatter_{username}') if save else 0
plt.show() if show else 0
def graphDict(self, graph_dict, num_vals=8, title=None, xlabel=None, ylabel=None, save=False, show=True, rotation=45):
labels = list(reversed(list(graph_dict.keys())[-num_vals:]))
vals = list(reversed(list(graph_dict.values())[-num_vals:]))
rescale = lambda y: (y - np.min(y)) / (np.max(y) - np.min(y))
cm = plt.cm.get_cmap('plasma')
x = np.arange(len(labels))
width = .8
rect = plt.bar(x, vals, width, label='graph', color=cm(rescale(vals)))
plt.xticks(x, labels, rotation=rotation)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(f'imgs/{title}_bar_{num_vals}_bars_{username}') if save else 0
plt.show() if show else 0
def graphNormDist(self, stat, save=False, show=True, mode=None, lane=None, role=None, champ=None, color='maroon'):
if isinstance(stat, list):
raise Exception('Only supported for one stat at a time')
norm_list=self.getStatList(stat, mode=mode, lane=lane, role=role, champ=champ)[0]
norm_list.sort()
mean = statistics.mean(norm_list)
sd = statistics.stdev(norm_list)
plt.plot(norm_list, norm.pdf(norm_list, mean, sd), color=color)
plt.xlabel(stat)
plt.ylabel('Frequency')
plt.title(f'Normal Distribution of {stat}')
plt.savefig(f'imgs/norm/{stat}_norm_{username}') if save else 0
plt.show() if show else 0
def userPlays(self): # TODO: Work with ID's?
ret_dict = dict()
for match in self.fdata.matchlist:
for part in match.participantIdentities:
player = part.player.summonerName
if player not in ret_dict.keys():
ret_dict[player] = 1
else:
ret_dict[player] += 1
return {k:v for k,v in sorted(ret_dict.items(), key=lambda item: item[1])}
def champCounts(self):
ret_dict = dict()
for match in self.fdata.matchlist:
player = match.participants[match.userIndex]
champ = player.championId
if champ in ret_dict.keys():
ret_dict[champ]+=1
else:
ret_dict[champ]=1
return {self.getChampName(k):v for k,v in sorted(ret_dict.items(), key=lambda item: item[1])}
def champKDAs(self, min_games=10):
ret_dict = dict()
for match in self.fdata.matchlist:
player = match.participants[match.userIndex]
champ = player.championId
kills= player.stats.kills
deaths = player.stats.deaths
assists = player.stats.assists
if champ in ret_dict.keys():
ret_dict[champ]['kills']+=kills
ret_dict[champ]['assists']+=assists
ret_dict[champ]['deaths']+=deaths
ret_dict[champ]['count']+=1
else:
ret_dict[champ]={'kills':kills, 'deaths':deaths,'assists':assists,'count':1}
ret_dict = {k:(v['kills']+v['assists'])/max([1,v['deaths']]) for k,v in ret_dict.items() if v['count'] >=min_games}
return {self.getChampName(k):v for k,v in sorted(ret_dict.items(), key=lambda item: item[1])}
# TODO: Fix and move to graph class
# TODO: Make this work with maxstat and sumstat
# TODO: How to iterate through classes
def compareStats(self, stat, userList, title=None, xlabel=None, ylabel=None, save=False, show=True, rotation=0):
stat_dict = dict()
for user in userList:
stat_dict[user] = user.avgStat(stat)
print(stat_dict)
self.graphDict(stat_dict, num_vals=len(stat_dict), title=title, xlabel=xlabel, ylabel=ylabel, save=save, show=show, rotation=rotation)
# TODO: Make generic for
# TODO: Make filter for champ
def statPerChamp(self, stat, min_games=5):
champ_counts = self.champCounts()
stat_dict = dict()
for k, v in champ_counts.items():
if v >= min_games:
stat_dict[k] = self.avgStat(stat)
return {k:v for k,v in sorted(stat_dict.items(), key=lambda item: item[1])}
stats = ['wardsPlaced', 'wardsKilled', 'visionWardsBoughtInGame', 'visionScore']
userList=['thomasm16', 'Demente', 'Adrian Gomez', 'Jamessv98']
stat = 'visionScore'
stat_list = ['totalTimeCrowdControlDealt', 'pentaKills']
# %%
tom = User(username)
# plotHist('visionScore', mode='CLASSIC', save=True, show=True)
# TODO: STAT per STAT
# TODO: Allow graphing of many stats
# TODO: Improve titles
# TODO: Items and runes
# TODO: Fix default values
# %%
tom.filterMode('CLASSIC')
tom.avgStat(stat)
# %%
# print(data.matchlist[0].userIndex)
# %%
|
{"/dataMan_class.py": ["/dataMan.py", "/riotClass.py"]}
|
36,944
|
skyler1ackerman/riotApi
|
refs/heads/master
|
/dataMan.py
|
import json, datetime, pprint, statistics, pickle
from config import TOKEN, ID
from riotwatcher import LolWatcher, ApiError
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
pp = pprint.PrettyPrinter(indent=4)
my_region = 'na1'
username = '<USERNAME>'
watcher = LolWatcher(TOKEN)
latest = watcher.data_dragon.versions_for_region(my_region)['n']['champion']
def getItemsNames():
static_item_list = watcher.data_dragon.items(latest, False)
item_dict = dict()
for key in static_item_list['data']:
item_dict[key] = static_item_list['data'][key]['name']
return item_dict
def getItemName(id_):
if 'item_dict' not in globals():
global item_dict # TODO: Change this to be from file
item_dict = getItemsNames()
return item_dict[str(id_)]
def removeDup():
temp = []
res = dict()
newData = []
for match in data:
if match['gameId'] not in temp:
temp.append(match['gameId'])
newData.append(match)
return newData
def getChampNames():
static_champ_list = watcher.data_dragon.champions(latest, False, 'en_US')
champ_dict = dict()
for key in static_champ_list['data']:
row = static_champ_list['data'][key]
champ_dict[row['key']] = row['id']
return champ_dict
def getChampName(id_):
if 'champ_dict' not in globals():
global champ_dict # TODO: Change this to be from file
champ_dict = getChampNames()
return champ_dict[str(id_)]
def getChampIDs():
if 'champ_dict' not in globals():
global champ_dict # TODO: Change this to be from file
champ_dict = getChampNames()
return {v:int(k) for k,v in champ_dict.items()}
def getChampID(name):
if 'id_dict' not in globals():
global id_dict # TODO: Change this to be from file
id_dict = getChampIDs()
return id_dict[name]
def loadData(file):
with open(file, 'rb') as f:
return pickle.load(f)
def getUserId(username=username):
# First match has most recent id
for part in data[0]['participantIdentities']:
if username == part['player']['summonerName']:
return part['player']['accountId']
raise
def getStatList(stat_list, mode=None, lane=None, role=None, username=username, champ=None, timenorm=False):
if isinstance(stat_list, list):
ret_list = [[] for x in stat_list]
else:
ret_list = list()
if champ:
champ_id=getChampID(champ)
sum_id=getUserId(username=username)
for match in data:
if mode != None and match['gameMode']!=mode:
continue
# Figure out which player is the user
for part in match['participantIdentities']:
if sum_id == part['player']['accountId']:
player = match['participants'][part['participantId']-1]
if lane and player['timeline']['lane']!=lane:
continue
if role and player['timeline']['role']!=role:
continue
if champ and player['championId']!=champ_id:
continue
# Sum the total of that stat
try:
if not isinstance(stat_list, list):
if timenorm:
ret_list.append((player['stats'][stat_list])/match['gameDuration'])
else:
ret_list.append(player['stats'][stat_list])
else:
temp=[]
for i, stat in enumerate(stat_list):
if timenorm:
temp.append(player['stats'][stat]/match['gameDuration'])
else:
temp.append(player['stats'][stat])
for i, val in enumerate(temp):
ret_list[i].append(val)
except KeyError:
pass
return np.array(ret_list)
def maxStat(stat_list, mode=None, lane=None, role=None, username=username, champ=None, timenorm=None):
if isinstance(stat_list, list):
return [max(x) for x in getStatList(**locals())]
return max(getStatList(**locals()))
def sumstat(stat_list, mode=None, lane=None, role=None, username=username, champ=None):
if isinstance(stat_list, list):
return [sum(x) for x in getStatList(**locals())]
return sum(getStatList(**locals()))
def avgStat(stat_list, mode=None, lane=None, role=None, username=username, champ=None, timenorm=None):
if isinstance(stat_list, list):
return [np.average(x) for x in getStatList(**locals())]
return np.average(getStatList(**locals()))
def sumTeamStat(stat, mode=None):
total = 0
id_=500
for match in data:
if mode and match['gameMode']!=mode:
continue
# Figure out which player is the user
for part in match['participantIdentities']:
if username == part['player']['summonerName']:
id_=part['participantId']
# Figure out which team the player is on
team = match['participants'][id_-1]['teamId']
# Team is either 100 or 200, so we can do this
total+=match['teams'][(team//100)-1][stat]
return total
def sumOtherTeamStat(stat, mode=None):
total = 0
id_=500
for match in data:
if mode and match['gameMode']!=mode:
continue
# Figure out which player is the user
for part in match['participantIdentities']:
if username == part['player']['summonerName']:
id_=part['participantId']
# Figure out which team the player is on
team = match['participants'][id_-1]['teamId']
# Team is either 100 or 200, so we can do this
total+=match['teams'][1-((team//100)-1)][stat]
return total
def sumTime(mode=None):
total = 0
id_=500
for match in data:
if mode and match['gameMode']!=mode:
continue
total+=match['gameDuration']
return total
def maxTime(mode=None):
total = 0
cur_max = -1
for match in data:
if mode and match['gameMode']!=mode:
continue
cur = match['gameDuration']
if cur > cur_max:
cur_max = cur
return cur_max
def plotHist(stat, mode=None, lane=None, champ=None, bins=None, save=False, show=True, timenorm=False, rotation=0):
# Get the stats list
stat_list = getStatList(stat, mode=mode, lane=lane, champ=champ, timenorm=timenorm)
# Get color map
cm = plt.cm.get_cmap('plasma')
# Make the histogram1
range_ = max(stat_list)-min(stat_list)
if bins == None:
bins = range_
try:
bin_range = np.arange(min(stat_list), max(stat_list), (range_)//bins)-.5
except ZeroDivisionError:
print('Too many bins!')
n, bins_, patches = plt.hist(stat_list, bins=bin_range)
# Change the color
bin_centers = 0.5 * (bins_[:-1] + bins_[1:])
col = bin_centers - min(bin_centers)
col /= max(col)
for c, p in zip(col, patches):
plt.setp(p, 'facecolor', cm(c))
if range_ > 15:
numticks = 15
else:
numticks = range_
tick_range = np.arange(min(stat_list), max(stat_list)-1, (range_)//numticks)
plt.xticks(tick_range, rotation=rotation)
plt.title(f'Frequency of {stat}')
plt.xlabel(f'Total {stat}')
plt.ylabel('Frequency')
plt.savefig(f'imgs/{stat}_hist_{bins}_bins_{username}') if save else 0
plt.show() if show else 0
def graphScatter(stat_list, save=False, show=True, mode=None, lane=None, role=None, champ=None):
if len(stat_list)!=2:
raise Exception('Only supported for two stats vs eachother')
ret_list = getStatList(stat_list, mode=mode, lane=lane, role=role, champ=champ)
plt.scatter(ret_list[0], ret_list[1], c=np.multiply(ret_list[0], ret_list[1]), cmap='plasma')
plt.xlabel(stat_list[0])
plt.ylabel(stat_list[1])
title = f'{stat_list[0]} vs {stat_list[1]}'
plt.title(title)
plt.savefig(f'imgs/scatter/{title}_scatter_{username}') if save else 0
plt.show() if show else 0
def graphDict(graph_dict, num_vals=8, title=None, xlabel=None, ylabel=None, save=False, show=True, rotation=45):
labels = list(reversed(list(graph_dict.keys())[-num_vals:]))
vals = list(reversed(list(graph_dict.values())[-num_vals:]))
rescale = lambda y: (y - np.min(y)) / (np.max(y) - np.min(y))
cm = plt.cm.get_cmap('plasma')
x = np.arange(len(labels))
width = .8
rect = plt.bar(x, vals, width, label='graph', color=cm(rescale(vals)))
plt.xticks(x, labels, rotation=rotation)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(f'imgs/{title}_bar_{num_vals}_bars_{username}') if save else 0
plt.show() if show else 0
def graphNormDist(stat, save=False, show=True, mode=None, lane=None, role=None, champ=None, color='maroon'):
if isinstance(stat, list):
raise Exception('Only supported for one stat at a time')
norm_list=getStatList(stat, mode=mode, lane=lane, role=role, champ=champ)
norm_list.sort()
mean = statistics.mean(norm_list)
sd = statistics.stdev(norm_list)
plt.plot(norm_list, norm.pdf(norm_list, mean, sd), color=color)
plt.xlabel(stat)
plt.ylabel('Frequency')
plt.title(f'Normal Distribution of {stat}')
plt.savefig(f'imgs/norm/{stat}_norm_{username}') if save else 0
plt.show() if show else 0
def userPlays():
ret_dict = dict()
for match in data:
# Get the participant
for part in match['participantIdentities']:
player = part['player']['summonerName']
if player not in ret_dict.keys():
ret_dict[player] = 1
else:
ret_dict[player] += 1
return ret_dict
def userWins():
ret_dict = dict()
id_=500
for match in data:
# Get the participant
for part in match['participantIdentities']:
if username == part['player']['summonerName']:
id_=part['participantId']
player = part['player']['summonerName']
toAdd = 0
if match['participants'][id_-1]['stats']['win']:
toAdd = 1
if player not in ret_dict.keys():
ret_dict[player] = toAdd
else:
ret_dict[player] += toAdd
return ret_dict
def champCounts(mode=None):
id_=500
ret_dict = dict()
for match in data:
for part in match['participantIdentities']:
if mode != None and match['gameMode']!=mode:
continue
if username == part['player']['summonerName']:
id_=part['participantId']
champ = match['participants'][id_-1]['championId']
if champ in ret_dict.keys():
ret_dict[champ]+=1
else:
ret_dict[champ]=1
return {getChampName(k):v for k,v in sorted(ret_dict.items(), key=lambda item: item[1])}
def champKDAs(min_games=10):
ret_dict = dict()
for match in data:
for part in match['participantIdentities']:
if username == part['player']['summonerName']:
player= match['participants'][part['participantId']-1]
champ = player['championId']
kills= player['stats']['kills']
deaths = player['stats']['deaths']
assists = player['stats']['assists']
if champ in ret_dict.keys():
ret_dict[champ]['kills']+=kills
ret_dict[champ]['assists']+=assists
ret_dict[champ]['deaths']+=deaths
ret_dict[champ]['count']+=1
else:
ret_dict[champ]={'kills':kills, 'deaths':deaths,'assists':assists,'count':1}
ret_dict = {k:(v['kills']+v['assists'])/max([1,v['deaths']]) for k,v in ret_dict.items() if v['count'] >=min_games}
return {getChampName(k):v for k,v in sorted(ret_dict.items(), key=lambda item: item[1])}
def compareStats(stat, userList, fun=avgStat, mode=None, lane=None, role=None, title=None, xlabel=None, ylabel=None, save=False, show=True, rotation=0, timenorm=False):
global data
stat_dict = dict()
for user in userList:
data = loadData(f'pickle/match_det_{user}')
stat_dict[user] = fun(stat, mode=mode, lane=lane, role=role, username=user, timenorm=timenorm)
print(stat_dict)
graphDict(stat_dict, num_vals=len(stat_dict), title=title, xlabel=xlabel, ylabel=ylabel, save=save, show=show, rotation=rotation)
def statPerChamp(stat, min_games=5, fun=avgStat, mode=None, timenorm=False):
champ_counts = champCounts(mode=mode)
pprint.pprint(champ_counts)
stat_dict = dict()
for k, v in champ_counts.items():
if v >= min_games:
stat_dict[k] = fun(stat, champ=k, mode=mode, timenorm=timenorm)
# TODO: Why does this break?
return {k:v for k,v in sorted(stat_dict.items(), key=lambda item: item[1])}
# def statPerStat(stat_list, mode=None, lane=None, role=None)
# champCounts = champCounts()
# graphDict(champCounts, ylabel='Total plays', title='Total champ plays', save=True, num_vals=10)
# print(avgStat('visionScore', mode='CLASSIC'))
stats = ['wardsPlaced', 'wardsKilled', 'visionWardsBoughtInGame', 'visionScore']
userList=['thomasm16', 'Demente', 'Adrian Gomez', 'Jamessv98']
stat = 'visionScore'
stat_list = ['totalTimeCrowdControlDealt', 'visionScore']
# compareStats(stat, userList, lane='JUNGLE', title=f' Avg {stat} per person', ylabel=f'Avg {stat} per game', xlabel=f'Player', save=True, timenorm=True)
data = loadData(f'pickle/match_det_{username}')
# graphDict(statPerChamp('totalDamageDealt', timenorm=True, mode='CLASSIC'), rotation=45)
print(sumstat('perk0Var1'))
# print(avgStat('totalDamageDealt', mode='CLASSIC'))
# graphDict(champKDAs(), title=f'{username} Best KDA\'s', xlabel='Champs', ylabel='KDA', save=True, show=True)
# avgStat(stat, champ='Syndra')
# graphNormDist(stat, mode='CLASSIC', show=False)
stat = 'goldEarned'
# graphNormDist(stat, mode='CLASSIC', color='blue')
# plotHist(stat, mode='CLASSIC', timenorm=True, bins=20, rotation=90)
# graphDict(statPerChamp(stat, min_games=20), title='Vision Score per champ', xlabel='Champion', ylabel='Vision Score', rotation=45,save=True, num_vals=20)
# print(avgStat(stat, champ='Thresh'))
# pprint.pprint(data[0]['participants'][0]['stats'])
# graphDict(champCounts(), save=True, title='King is a one trick', rotation=0)
# plotHist('visionScore', mode='CLASSIC', save=True, show=True)
# TODO: STAT per STAT
# TODO: Allow graphing of many stats
# TODO: Improve titles
# TODO: Items and runes
|
{"/dataMan_class.py": ["/dataMan.py", "/riotClass.py"]}
|
36,945
|
skyler1ackerman/riotApi
|
refs/heads/master
|
/pickler.py
|
import pickle, json, time
username = '<USERNAME>'
usernames=['<USERNAMES>']
for username in usernames:
with open(f'json/match_det_{username}.json', "r") as f:
data = json.load(f)
with open(f'match_det_{username}', 'wb') as f:
pickle.dump(data, f)
with open(f'match_det_{username}', 'rb') as f:
new_data = pickle.load(f)
|
{"/dataMan_class.py": ["/dataMan.py", "/riotClass.py"]}
|
36,946
|
skyler1ackerman/riotApi
|
refs/heads/master
|
/api_getter.py
|
from config import TOKEN, ID
from riotwatcher import LolWatcher, ApiError
import pprint, json, time, pickle
pp = pprint.PrettyPrinter(indent=4)
watcher = LolWatcher(TOKEN)
my_region = 'na1'
username = '<USERNAME>'
me = watcher.summoner.by_name(my_region, username)
# # my_matches = watcher.match.matchlist_by_account(my_region, me['accountId'])
# # last_match = my_matches['matches'][0]
# # match_detail = watcher.match.by_id(my_region, last_match['gameId'])
i = 0
my_matches = {'matches':[1]}
all_list=[]
while len(my_matches['matches'])!=0:
my_matches = watcher.match.matchlist_by_account(my_region, me['accountId'], begin_index=i*100, end_index=i*100+100)
i+=1
h=0
while h < len(my_matches['matches']):
match = my_matches['matches'][h]
try:
match_detail = watcher.match.by_id(my_region, match['gameId'])
all_list.append(match_detail)
h+=1
except Exception:
print(Exception)
time.sleep(20)
print(i)
with open(f'pickle/match_det_{username}', 'wb') as f:
pickle.dump(data, f)
|
{"/dataMan_class.py": ["/dataMan.py", "/riotClass.py"]}
|
36,947
|
skyler1ackerman/riotApi
|
refs/heads/master
|
/riotClass.py
|
import pickle, jsons, pprint
from dataclasses import dataclass, field
from typing import List
pp = pprint.PrettyPrinter(indent=4)
@dataclass
class Player:
platformId: str = None
accountId: str = None
summonerName: str = None
summonerId: str = None
currentPlatformId: str = None
currentAccountId: str = None
matchHistoryUri: str = None
profileIcon: int = None
def __str__(self):
return self.summonerName
@dataclass
class ParticipantId:
participantId: int
player: Player
@dataclass
class Timeline:
participantId: int
role: str
lane: str
creepsPerMinDeltas: dict = field(default_factory=dict)
xpPerMinDeltas: dict = field(default_factory=dict)
goldPerMinDeltas: dict = field(default_factory=dict)
csDiffPerMinDeltas: dict = field(default_factory=dict)
xpDiffPerMinDeltas: dict = field(default_factory=dict)
damageTakenPerMinDeltas: dict = field(default_factory=dict)
damageTakenDiffPerMinDeltas: dict = field(default_factory=dict)
@dataclass
class Stats:
participantId: int = None
win: bool = None
item0: int = None
item1: int = None
item2: int = None
item3: int = None
item4: int = None
item5: int = None
item6: int = None
kills: int = 0
deaths: int = 0
assists: int = 0
largestKillingSpree: int = 0
largestMultiKill: int = 0
killingSprees: int = 0
longestTimeSpentLiving: int = 0
doubleKills: int = 0
tripleKills: int = 0
quadraKills: int = 0
pentaKills: int = 0
unrealKills: int = 0
totalDamageDealt: int = 0
magicDamageDealt: int = 0
physicalDamageDealt: int = 0
trueDamageDealt: int = 0
largestCriticalStrike: int = 0
totalDamageDealtToChampions: int = 0
magicDamageDealtToChampions: int = 0
physicalDamageDealtToChampions: int = 0
trueDamageDealtToChampions: int = 0
totalHeal: int = 0
totalUnitsHealed: int = 0
damageSelfMitigated: int = 0
damageDealtToObjectives: int = 0
damageDealtToTurrets: int = 0
visionScore: int = 0
timeCCingOthers: int = 0
totalDamageTaken: int = 0
magicalDamageTaken: int = 0
physicalDamageTaken: int = 0
trueDamageTaken: int = 0
goldEarned: int = 0
goldSpent: int = 0
turretKills: int = 0
inhibitorKills: int = 0
totalMinionsKilled: int = 0
neutralMinionsKilled: int = 0
neutralMinionsKilledEnemyJungle: int = 0
totalTimeCrowdControlDealt: int = 0
champLevel: int = 0
visionWardsBoughtInGame: int = 0
sightWardsBoughtInGame: int = 0
wardsPlaced: int = 0
wardsKilled: int = 0
combatPlayerScore: int = 0
objectivePlayerScore: int = 0
totalPlayerScore: int = 0
totalScoreRank: int = 0
playerScore0: int = 0
playerScore1: int = 0
playerScore2: int = 0
playerScore3: int = 0
playerScore4: int = 0
playerScore5: int = 0
playerScore6: int = 0
playerScore7: int = 0
playerScore8: int = 0
playerScore9: int = 0
perk0: int = None
perk0Var1: int = None
perk0Var2: int = None
perk0Var3: int = None
perk1: int = None
perk1Var1: int = None
perk1Var2: int = None
perk1Var3: int = None
perk2: int = None
perk2Var1: int = None
perk2Var2: int = None
perk2Var3: int = None
perk3: int = None
perk3Var1: int = None
perk3Var2: int = None
perk3Var3: int = None
perk4: int = None
perk4Var1: int = None
perk4Var2: int = None
perk4Var3: int = None
perk5: int = None
perk5Var1: int = None
perk5Var2: int = None
perk5Var3: int = None
perkPrimaryStyle: int = None
perkSubStyle: int = None
statPerk0: int = None
statPerk1: int = None
statPerk2: int = None
neutralMinionsKilledTeamJungle: int = None
firstInhibitorKill: bool = None
firstInhibitorAssist: bool = None
firstBloodKill: bool = None
firstBloodAssist: bool = None
firstTowerKill: bool = None
firstTowerAssist: bool = None
@dataclass
class Participant:
participantId: int
teamId: int
championId: int
spell1Id: int
spell2Id: int
stats: Stats
timeline: Timeline
@dataclass
class Ban:
championId: int
pickTurn: int
@dataclass
class Team:
teamId: int
win: bool
firstBlood: bool
firstTower: bool
firstBaron: bool
firstDragon: bool
firstRiftHerald: bool
towerKills: int
inhibitorKills: int
baronKills: int
dragonKills: int
vilemawKills: int
riftHeraldKills: int
dominionVictoryScore: int
bans: List[Ban]
firstInhibitor: bool = None
@dataclass
class Match:
gameId: int
platformId: str
gameCreation: int
gameDuration: int
queueId: int
mapId: int
seasonId: int
gameVersion: str
gameMode: str
gameType: str
teams: List[Team]
participants: List[Participant]
participantIdentities: List[ParticipantId]
userIndex: int = None
def get_id(self, sum_id):
for part in self.participantIdentities:
if sum_id == part.player.accountId:
self.userIndex = part.participantId-1
@dataclass
class MatchList:
matchlist: List[Match]
def __len__(self):
return len(self.matchlist)
# %%
|
{"/dataMan_class.py": ["/dataMan.py", "/riotClass.py"]}
|
36,968
|
ragnarak54/PortsVoyageOptimizer
|
refs/heads/master
|
/main.py
|
import member
from itertools import combinations
from typing import List
import random
import datetime
class Voyage:
def __init__(self, morale, combat, seafaring, time):
self.morale = morale
self.combat = combat
self.seafaring = seafaring
self.time = time
members = member.crew
def percent(crew: List[member.Member], voyage: Voyage):
morale = sum([x.morale for x in crew])
combat = sum([x.combat for x in crew])
seafaring = sum([x.seafaring for x in crew])
return min(morale / voyage.morale, combat / voyage.combat, seafaring / voyage.seafaring)
def optimal_crew(voyage: Voyage):
best = members[:5] + [random.choice(member.captains)]
for combo in combinations(members, 5):
for captain in member.captains:
if percent(best, voyage) < percent(list(combo) + [captain], voyage):
best = list(combo) + [captain]
return best
now = datetime.datetime.now()
best_crew = optimal_crew(Voyage(14400, 14400, 9000, 0))
print(best_crew, percent(best_crew, Voyage(14400, 14400, 9000, 0)))
print(f'time elapsed: {(datetime.datetime.now() - now)}')
|
{"/main.py": ["/member.py"]}
|
36,969
|
ragnarak54/PortsVoyageOptimizer
|
refs/heads/master
|
/member.py
|
import random
class Member:
def __init__(self, morale, combat, seafaring, speed):
self.morale = morale
self.combat = combat
self.seafaring = seafaring
self.speed = speed
def __repr__(self):
return str([self.morale, self.combat, self.seafaring])
class Bow:
def __init__(self, name, morale, combat, seafaring):
self.name = name
self.morale = morale
self.combat = combat
self.seafaring = seafaring
class Hull:
def __init__(self, name, morale, combat, seafaring):
self.name = name
self.morale = morale
self.combat = combat
self.seafaring = seafaring
class OnDeck:
def __init__(self, name, morale, combat, seafaring):
self.name = name
self.morale = morale
self.combat = combat
self.seafaring = seafaring
class Ship:
def __init__(self, bow: Bow, hull: Hull, on_deck: OnDeck, rudder):
self.bow = bow
self.hull = hull
self.on_deck = on_deck
self.rudder = rudder
crew = []
captains = [Member(1000, 0, 0, 0), Member(0, 1000, 0, 0), Member(0, 0, 1000, 0),
Member(500, 500, 500, 0), Member(0, 500, 500, 0)]
i = 0
while i < 25:
crew.append(Member(random.randint(0, 2000), random.randint(0, 2000),
random.randint(0, 2000), random.randint(0, 2000)))
i += 1
|
{"/main.py": ["/member.py"]}
|
36,974
|
gcbeltramini/etl-cassandra
|
refs/heads/master
|
/test_db_utils.py
|
from pytest import mark
from db_utils import create_insert_command
@mark.parametrize('table_name, table_info, expected', [
('foo', '(bar INT, baz TEXT, qwerty1 TEXT)',
'INSERT INTO foo (bar, baz, qwerty1)\nVALUES (%s, %s, %s);'),
('my_table', '(c1 TEXT, c2 INT, PARTITION KEY ((c1), c2));',
'INSERT INTO my_table (c1, c2)\nVALUES (%s, %s);'),
])
def test_create_insert_command(table_name, table_info, expected):
result = create_insert_command(table_name, table_info)
assert result == expected
|
{"/test_db_utils.py": ["/db_utils.py"]}
|
36,975
|
gcbeltramini/etl-cassandra
|
refs/heads/master
|
/db_utils.py
|
import csv
import glob
import os
import re
from typing import Callable, List, Tuple
from cassandra.cluster import Cluster, Session
CASSANDRA_KEYSPACE = 'udacity'
HEADER = ('artist', 'userFirstName', 'userGender', 'itemInSession',
'userLastName', 'songLength', 'level', 'userLocation', 'sessionId',
'songTitle', 'userId') # "level": paid or free
def get_files(parent_dir: str, extension: str = 'csv') -> List[str]:
"""
Create a list of files under `parent_dir` (and possibly under
subfolders), with a specific extension.
Parameters
----------
parent_dir : str
Parent directory.
extension : str, optional
File extension.
Returns
-------
list[str]
List of full file name.
"""
file_path = []
for root, _, _ in os.walk(parent_dir):
file_path.extend(glob.glob(os.path.join(root,
f'*.{extension:s}')))
return sorted(file_path)
def read_all_rows(csv_files: List[str]) -> List[List[str]]:
"""
Concatenate all rows from all files, excluding the header (first row).
Parameters
----------
csv_files : list[str]
CSV file names.
Returns
-------
list[list[str]]
List of rows of all files. Each element is the list of columns of that
row.
"""
rows_list = []
for f in csv_files:
# Read CSV file
with open(f, 'r', encoding='utf8', newline='') as csvfile:
csvreader = csv.reader(csvfile)
next(csvreader) # skip header
# Extract each data row one by one
for line in csvreader:
rows_list.append(line)
return rows_list
def read_header(csv_filename: str) -> List[str]:
"""
Read only header from CSV file.
Parameters
----------
csv_filename : str
CSV file name.
Returns
-------
list[str]
Header.
"""
with open(csv_filename, 'r', encoding='utf8', newline='') as csvfile:
csvreader = csv.reader(csvfile)
return next(csvreader)
def write_csv(rows: List[str],
csv_filename: str,
header: Tuple[str] = HEADER) -> None:
"""
Create CSV file with data from `rows`.
Parameters
----------
rows: list[str]
Rows to write to `csv_filename`.
csv_filename : str
File name of the new CSV file.
header : tuple[str]
Column names.
Returns
-------
None
File `csv_filename` will be created with the content from `rows`.
"""
csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,
skipinitialspace=True)
with open(csv_filename, 'w', encoding='utf8', newline='') as f:
writer = csv.writer(f, dialect='myDialect')
# Write header
writer.writerow(header)
# Write row by row
for row in rows:
if row[0] == '': # skip row when there is no artist
continue
writer.writerow((row[0], row[2], row[3], row[4], row[5], row[6],
row[7], row[8], row[12], row[13], row[16]))
def create_insert_command(table_name: str, table_info: str) -> str:
"""
Create SQL INSERT command.
Parameters
----------
table_name : str
Table name.
table_info : str
Table information passed to SQL CREATE TABLE command.
Returns
-------
str
SQL INSERT command.
"""
columns = re.findall('(\w+) (?:INT|FLOAT|TEXT)', table_info)
return ('INSERT INTO {table:s} ({columns:s})\nVALUES ({string:s});'
''.format(table=table_name,
columns=', '.join(columns),
string=', '.join(['%s' for _ in range(len(columns))])))
def cassandra_connect(keyspace: str = CASSANDRA_KEYSPACE
) -> Tuple[Cluster, Session]:
"""
Connect to Apache Cassandra cluster.
Parameters
----------
keyspace : str, optional
Default keyspace to set for all queries made through the session.
Returns
-------
cassandra.cluster.Cluster, cassandra.cluster.Session
"""
# Make a connection to a Cassandra instance on the local machine
cluster = Cluster(['127.0.0.1'])
# Create session to establish connection and begin executing queries
session = cluster.connect()
# Create keyspace
session.execute(f"CREATE KEYSPACE IF NOT EXISTS {keyspace:s}\n"
"WITH REPLICATION = { 'class' : 'SimpleStrategy', "
"'replication_factor' : 1 }")
# Set the default keyspace for all queries made through this Session
session.set_keyspace(keyspace)
return cluster, session
def drop_table(table: str, session: Session) -> None:
"""
Drop table.
Parameters
----------
table : str
Table name.
session : cassandra.cluster.Session
Cassandra session.
"""
session.execute(f'DROP TABLE IF EXISTS {table:s};')
def create_table(table_name: str, table_info: str, session: Session) -> None:
"""
Drop and create table.
Parameters
----------
table_name : str
Table name.
table_info : str
Table information passed to SQL CREATE TABLE command.
session : cassandra.cluster.Session
Cassandra session.
"""
drop_table(table_name, session)
create_command = f'CREATE TABLE IF NOT EXISTS {table_name:s}\n{table_info:s};'
session.execute(create_command)
def insert_rows(table_name: str, table_info: str,
row_fn: Callable[[tuple], tuple],
csv_file: str, session: Session) -> None:
"""
Insert rows from CSV file into table.
Parameters
----------
table_name : str
Table name.
table_info : str
Table information passed to SQL CREATE TABLE command.
row_fn : callable[tuple, tuple]
csv_file : str
session : cassandra.cluster.Session
Cassandra session.
"""
query = create_insert_command(table_name, table_info)
with open(csv_file, encoding='utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
session.execute(query, row_fn(line))
def run_query(query: str, session: Session) -> None:
"""
Run SQL query and print rows.
Parameters
----------
query : str
SQL query.
session : cassandra.cluster.Session
Cassandra session.
"""
rows = session.execute(query)
for row in rows:
print(row)
|
{"/test_db_utils.py": ["/db_utils.py"]}
|
37,005
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/workflow.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import atexit
import http
import io
import json
import jsonschema
import logging
import pathlib
import platform
import shutil
import sys
import threading
import time
import types
import uuid
from pathlib import Path
from typing import List, Mapping, Pattern, Tuple, Union
from urllib import request, parse
from rocrate import rocrate
import bagit
# We have preference for the C based loader and dumper, but the code
# should fallback to default implementations when C ones are not present
try:
from yaml import CLoader as YAMLLoader, CDumper as YAMLDumper
except ImportError:
from yaml import Loader as YAMLLoader, Dumper as YAMLDumper
import yaml
import crypt4gh.lib
import crypt4gh.keys
from .common import *
from .encrypted_fs import *
from .engine import WorkflowEngine, WorkflowEngineException
from .engine import WORKDIR_WORKFLOW_META_FILE, WORKDIR_SECURITY_CONTEXT_FILE, WORKDIR_PASSPHRASE_FILE
from .engine import WORKDIR_MARSHALLED_STAGE_FILE, WORKDIR_MARSHALLED_EXECUTE_FILE, WORKDIR_MARSHALLED_EXPORT_FILE
from .engine import WORKDIR_INPUTS_RELDIR, WORKDIR_INTERMEDIATE_RELDIR, WORKDIR_META_RELDIR, WORKDIR_OUTPUTS_RELDIR, \
WORKDIR_ENGINE_TWEAKS_RELDIR
from .cache_handler import SchemeHandlerCacheHandler
from .utils.marshalling_handling import marshall_namedtuple, unmarshall_namedtuple
from .fetchers import DEFAULT_SCHEME_HANDLERS
from .fetchers.pride import SCHEME_HANDLERS as PRIDE_SCHEME_HANDLERS
from .fetchers.trs_files import INTERNAL_TRS_SCHEME_PREFIX, SCHEME_HANDLERS as INTERNAL_TRS_SCHEME_HANDLERS
from .nextflow_engine import NextflowWorkflowEngine
from .cwl_engine import CWLWorkflowEngine
# The list of classes to be taken into account
# CWL detection is before, as Nextflow one is
# a bit lax (only detects a couple of too common
# keywords)
WORKFLOW_ENGINE_CLASSES = [
CWLWorkflowEngine,
NextflowWorkflowEngine,
]
class WF:
"""
Workflow enaction class
"""
DEFAULT_PASSPHRASE_LENGTH = 4
CRYPT4GH_SECTION = 'crypt4gh'
CRYPT4GH_PRIVKEY_KEY = 'key'
CRYPT4GH_PUBKEY_KEY = 'pub'
CRYPT4GH_PASSPHRASE_KEY = 'passphrase'
TRS_METADATA_FILE = 'trs_metadata.json'
TRS_QUERY_CACHE_FILE = 'trs_result.json'
TRS_TOOL_FILES_FILE = 'trs_tool_files.json'
SCHEMAS_REL_DIR = 'schemas'
CONFIG_SCHEMA = 'config.json'
SECURITY_CONTEXT_SCHEMA = 'security-context.json'
STAGE_DEFINITION_SCHEMA = 'stage-definition.json'
DEFAULT_RO_EXTENSION = ".crate.zip"
DEFAULT_TRS_ENDPOINT = "https://dev.workflowhub.eu/ga4gh/trs/v2/" # root of GA4GH TRS API
TRS_TOOLS_PATH = 'tools/'
WORKFLOW_ENGINES = list(map(lambda clazz: clazz.WorkflowType(), WORKFLOW_ENGINE_CLASSES))
RECOGNIZED_TRS_DESCRIPTORS = dict(map(lambda t: (t.trs_descriptor, t), WORKFLOW_ENGINES))
@classmethod
def generate_passphrase(cls) -> str:
import random
from pwgen_passphrase.__main__ import generate_passphrase, list_wordlists, read_wordlist
wordlists_filenames = list_wordlists()
wordlists_tags = [*wordlists_filenames.keys()]
wordlist_filename = wordlists_filenames[wordlists_tags[random.randrange(len(wordlists_tags))]]
wordlist = read_wordlist(wordlist_filename).splitlines()
return generate_passphrase(wordlist, cls.DEFAULT_PASSPHRASE_LENGTH)
@classmethod
def bootstrap(cls, local_config, config_directory=None, key_prefix=None):
"""
:param local_config: Relevant local configuration, like the cache directory.
:param config_directory: The filename to be used to resolve relative paths
:param key_prefix:
:type local_config: dict
"""
import datetime
import socket
logger = logging.getLogger(cls.__name__)
updated = False
# Getting the config directory
if config_directory is None:
config_directory = os.getcwd()
if not os.path.isabs(config_directory):
config_directory = os.path.abspath(config_directory)
if key_prefix is None:
key_prefix = ''
# This one is to assure the working directory is created
workDir = local_config.get('workDir')
if workDir:
if not os.path.isabs(workDir):
workDir = os.path.normpath(os.path.join(config_directory, workDir))
os.makedirs(workDir, exist_ok=True)
# Now, checking whether public and private key pairs exist
numExist = 0
crypt4ghSect = local_config.get(cls.CRYPT4GH_SECTION)
if crypt4ghSect is None:
local_config[cls.CRYPT4GH_SECTION] = {}
crypt4ghSect = local_config[cls.CRYPT4GH_SECTION]
for elem in (cls.CRYPT4GH_PRIVKEY_KEY, cls.CRYPT4GH_PUBKEY_KEY):
fname = crypt4ghSect.get(elem)
# The default when no filename exist is creating hidden files in the config directory
if fname is None:
fname = key_prefix + '.' + elem
crypt4ghSect[elem] = fname
updated = True
if not os.path.isabs(fname):
fname = os.path.normpath(os.path.join(config_directory, fname))
if os.path.exists(fname):
if os.path.getsize(fname) == 0:
logger.warning("[WARNING] Installation {} file {} is empty".format(elem, fname))
else:
numExist += 1
else:
logger.warning("[WARNING] Installation {} file {} does not exist".format(elem, fname))
if numExist == 1:
raise WFException("Inconsistent {} section, as one of the keys is missing".format(cls.CRYPT4GH_SECTION))
# Time to generate the pairs needed to work with crypt4gh
if numExist == 0:
privKey = crypt4ghSect[cls.CRYPT4GH_PRIVKEY_KEY]
if not os.path.isabs(privKey):
privKey = os.path.normpath(os.path.join(config_directory, privKey))
pubKey = crypt4ghSect[cls.CRYPT4GH_PUBKEY_KEY]
if not os.path.isabs(pubKey):
pubKey = os.path.normpath(os.path.join(config_directory, pubKey))
if cls.CRYPT4GH_PASSPHRASE_KEY not in crypt4ghSect:
passphrase = cls.generate_passphrase()
crypt4ghSect[cls.CRYPT4GH_PASSPHRASE_KEY] = passphrase
updated = True
else:
passphrase = crypt4ghSect[cls.CRYPT4GH_PASSPHRASE_KEY]
comment = 'WfExS crypt4gh keys {} {} {}'.format(socket.gethostname(), config_directory,
datetime.datetime.now().isoformat())
crypt4gh.keys.c4gh.generate(privKey, pubKey, passphrase=passphrase.encode('utf-8'),
comment=comment.encode('utf-8'))
return updated, local_config
@classmethod
def FromDescription(cls, workflow_meta, local_config, creds_config=None, config_directory=None):
"""
:param workflow_meta: The configuration describing both the workflow
and the inputs to use when it is being instantiated.
:param local_config: Relevant local configuration, like the cache directory.
:param creds_config: Dictionary with the different credential contexts (to be implemented)
:param config_directory:
:type workflow_meta: dict
:type local_config: dict
:type creds_config: dict
:type config_directory:
:return: Workflow configuration
"""
if creds_config is None:
creds_config = {}
_, updated_local_config = cls.bootstrap(local_config, config_directory=config_directory)
return cls(
local_config,
config_directory=config_directory
).newSetup(
workflow_meta['workflow_id'],
workflow_meta.get('version'),
descriptor_type=workflow_meta.get('workflow_type'),
trs_endpoint=workflow_meta.get('trs_endpoint', cls.DEFAULT_TRS_ENDPOINT),
params=workflow_meta.get('params', {}),
outputs=workflow_meta.get('outputs', {}),
workflow_config=workflow_meta.get('workflow_config'),
creds_config=creds_config
)
@classmethod
def ConfigValidate(cls, configToValidate, relSchemaFile):
# Locating the schemas directory, where all the schemas should be placed
schemaFile = os.path.join(os.path.dirname(__file__), cls.SCHEMAS_REL_DIR, relSchemaFile)
try:
with open(schemaFile, mode="r", encoding="utf-8") as sF:
schema = json.load(sF)
jv = jsonschema.validators.validator_for(schema)(schema)
return list(jv.iter_errors(instance=configToValidate))
except Exception as e:
raise WFException(f"FATAL ERROR: corrupted schema {relSchemaFile}. Reason: {e}")
def __init__(self, local_config=None, config_directory=None):
"""
Init function
:param local_config: Local setup configuration, telling where caching directories live
:type local_config: dict
"""
# Getting a logger focused on specific classes
self.logger = logging.getLogger(self.__class__.__name__)
if not isinstance(local_config, dict):
local_config = {}
# validate the local configuration object
valErrors = self.ConfigValidate(local_config, self.CONFIG_SCHEMA)
if len(valErrors) > 0:
self.logger.error(f'ERROR in local configuration block: {valErrors}')
sys.exit(1)
self.local_config = local_config
toolSect = local_config.get('tools', {})
self.git_cmd = toolSect.get('gitCommand', DEFAULT_GIT_CMD)
encfsSect = toolSect.get('encrypted_fs', {})
encfs_type = encfsSect.get('type', DEFAULT_ENCRYPTED_FS_TYPE)
try:
encfs_type = EncryptedFSType(encfs_type)
except:
raise WFException('Invalid default encryption filesystem {}'.format(encfs_type))
if encfs_type not in ENCRYPTED_FS_MOUNT_IMPLEMENTATIONS:
raise WFException('FIXME: Default encryption filesystem {} mount procedure is not implemented')
self.encfs_type = encfs_type
self.encfs_cmd = shutil.which(encfsSect.get('command', DEFAULT_ENCRYPTED_FS_CMD[self.encfs_type]))
self.fusermount_cmd = encfsSect.get('fusermount_command', DEFAULT_FUSERMOUNT_CMD)
self.encfs_idleMinutes = encfsSect.get('idle', DEFAULT_ENCRYPTED_FS_IDLE_TIMEOUT)
# Getting the config directory, needed for relative filenames
if config_directory is None:
config_directory = os.getcwd()
if not os.path.isabs(config_directory):
config_directory = os.path.abspath(config_directory)
self.config_directory = config_directory
# Getting the private and public keys, needed from this point
crypt4ghSect = local_config.get(self.CRYPT4GH_SECTION, {})
privKeyFilename = crypt4ghSect[self.CRYPT4GH_PRIVKEY_KEY]
if not os.path.isabs(privKeyFilename):
privKeyFilename = os.path.normpath(os.path.join(config_directory, privKeyFilename))
pubKeyFilename = crypt4ghSect[self.CRYPT4GH_PUBKEY_KEY]
if not os.path.isabs(pubKeyFilename):
pubKeyFilename = os.path.normpath(os.path.join(config_directory, pubKeyFilename))
passphrase = crypt4ghSect[self.CRYPT4GH_PASSPHRASE_KEY]
# These are the keys to be used
self.pubKey = crypt4gh.keys.get_public_key(pubKeyFilename)
self.privKey = crypt4gh.keys.get_private_key(privKeyFilename, lambda: passphrase)
# This directory will be used to cache repositories and distributable inputs
cacheDir = local_config.get('cacheDir')
if cacheDir:
if not os.path.isabs(cacheDir):
cacheDir = os.path.normpath(os.path.join(config_directory, cacheDir))
os.makedirs(cacheDir, exist_ok=True)
else:
cacheDir = tempfile.mkdtemp(prefix='WfExS', suffix='backend')
# Assuring this temporal directory is removed at the end
atexit.register(shutil.rmtree, cacheDir)
# Setting up caching directories
self.cacheDir = cacheDir
self.cacheWorkflowDir = os.path.join(cacheDir, 'wf-cache')
os.makedirs(self.cacheWorkflowDir, exist_ok=True)
self.cacheROCrateDir = os.path.join(cacheDir, 'ro-crate-cache')
os.makedirs(self.cacheROCrateDir, exist_ok=True)
self.cacheTRSFilesDir = os.path.join(cacheDir, 'trs-files-cache')
os.makedirs(self.cacheTRSFilesDir, exist_ok=True)
self.cacheWorkflowInputsDir = os.path.join(cacheDir, 'wf-inputs')
os.makedirs(self.cacheWorkflowInputsDir, exist_ok=True)
# This directory will be used to store the intermediate
# and final results before they are sent away
workDir = local_config.get('workDir')
if workDir:
if not os.path.isabs(workDir):
workDir = os.path.normpath(os.path.join(config_directory, workDir))
os.makedirs(workDir, exist_ok=True)
else:
workDir = tempfile.mkdtemp(prefix='WfExS-workdir', suffix='backend')
# Assuring this temporal directory is removed at the end
atexit.register(shutil.rmtree, workDir)
self.baseWorkDir = workDir
self.rawWorkDir = None
self.workDir = None
self.encWorkDir = None
self.tempDir = None
self.encfsThread = None
self.doUnmount = False
self.paranoidMode = False
self.bag = None
self.stageMarshalled = False
self.executionMarshalled = False
self.exportMarshalled = False
# cacheHandler is created on first use
self.cacheHandler = SchemeHandlerCacheHandler(self.cacheDir, {})
# All the custom ones should be added here
self.cacheHandler.addSchemeHandlers(PRIDE_SCHEME_HANDLERS)
self.cacheHandler.addSchemeHandlers(INTERNAL_TRS_SCHEME_HANDLERS)
# These ones should have prevalence over other custom ones
self.cacheHandler.addSchemeHandlers(DEFAULT_SCHEME_HANDLERS)
def newSetup(self,
workflow_id,
version_id,
descriptor_type=None,
trs_endpoint=DEFAULT_TRS_ENDPOINT,
params=None,
outputs=None,
workflow_config=None,
creds_config=None
):
"""
Init function
:param workflow_id: A unique identifier of the workflow. Although it is an integer in WorkflowHub,
we cannot assume it is so in all the GA4GH TRS implementations which are exposing workflows.
:param version_id: An identifier of the workflow version. Although it is an integer in
WorkflowHub, we cannot assume the format of the version id, as it could follow semantic
versioning, providing an UUID, etc.
:param descriptor_type: The type of descriptor that represents this version of the workflow
(e.g. CWL, WDL, NFL, or GALAXY). It is optional, so it is guessed from the calls to the API.
:param trs_endpoint: The TRS endpoint used to find the workflow.
:param params: Optional params for the workflow execution.
:param outputs:
:param workflow_config: Tweaks for workflow enactment, like some overrides
:param creds_config: Dictionary with the different credential contexts
:type workflow_id: str
:type version_id: str
:type descriptor_type: str
:type trs_endpoint: str
:type params: dict
:type outputs: dict
:type workflow_config: dict
:type creds_config: dict
"""
if not isinstance(workflow_config, dict):
workflow_config = {}
workflow_meta = {
'workflow_id': workflow_id
}
if version_id is not None:
workflow_meta['version'] = version_id
if descriptor_type is not None:
workflow_meta['workflow_type'] = descriptor_type
if trs_endpoint is not None:
workflow_meta['trs_endpoint'] = trs_endpoint
if workflow_config is not None:
workflow_meta['workflow_config'] = workflow_config
if params is not None:
workflow_meta['params'] = params
if outputs is not None:
workflow_meta['outputs'] = outputs
valErrors = self.ConfigValidate(workflow_meta, self.STAGE_DEFINITION_SCHEMA)
if len(valErrors) > 0:
self.logger.error(f'ERROR in workflow staging definition block: {valErrors}')
raise WFException(f'ERROR in workflow staging definition block: {valErrors}')
if not isinstance(creds_config, dict):
creds_config = {}
valErrors = self.ConfigValidate(creds_config, self.SECURITY_CONTEXT_SCHEMA)
if len(valErrors) > 0:
self.logger.error(f'ERROR in security context block: {valErrors}')
raise WFException(f'ERROR in security context block: {valErrors}')
if not isinstance(params, dict):
params = {}
if not isinstance(outputs, dict):
outputs = {}
# Workflow-specific
self.workflow_config = workflow_config
self.creds_config = creds_config
self.id = str(workflow_id)
self.version_id = str(version_id)
self.descriptor_type = descriptor_type
self.params = params
self.outputs = self.parseExpectedOutputs(outputs)
# The endpoint should always end with a slash
if isinstance(trs_endpoint, str):
if trs_endpoint[-1] != '/':
trs_endpoint += '/'
# Removing the tools suffix, which appeared in first WfExS iterations
if trs_endpoint.endswith('/' + self.TRS_TOOLS_PATH):
trs_endpoint = trs_endpoint[0:-len(self.TRS_TOOLS_PATH)]
self.trs_endpoint = trs_endpoint
if self.rawWorkDir is None:
self.instanceId = str(uuid.uuid4())
# This directory is the raw working directory
# If the intermediate results should be hold in an encrypted
# temporary directory, this directory will hold it
uniqueRawWorkDir = os.path.join(self.baseWorkDir, self.instanceId)
os.makedirs(uniqueRawWorkDir, exist_ok=True)
self.rawWorkDir = uniqueRawWorkDir
# TODO: enforce restrictive permissions on each raw working directory
self.allowOther = False
self.secure = workflow_config.get('secure', True)
if self.workDir is None:
doSecureWorkDir = self.secure or self.paranoidMode
self.setupWorkdir(doSecureWorkDir)
# This directory will hold either symbolic links to the cached
# inputs, or the inputs properly post-processed (decompressed,
# decrypted, etc....)
self.inputsDir = os.path.join(self.workDir, WORKDIR_INPUTS_RELDIR)
os.makedirs(self.inputsDir, exist_ok=True)
# This directory should hold intermediate workflow steps results
self.intermediateDir = os.path.join(self.workDir, WORKDIR_INTERMEDIATE_RELDIR)
os.makedirs(self.intermediateDir, exist_ok=True)
# This directory will hold the final workflow results, which could
# be either symbolic links to the intermediate results directory
# or newly generated content
self.outputsDir = os.path.join(self.workDir, WORKDIR_OUTPUTS_RELDIR)
os.makedirs(self.outputsDir, exist_ok=True)
# This directory is here for those files which are created in order
# to tweak or patch workflow executions
self.engineTweaksDir = os.path.join(self.workDir, WORKDIR_ENGINE_TWEAKS_RELDIR)
os.makedirs(self.engineTweaksDir, exist_ok=True)
# This directory will hold metadata related to the execution
self.metaDir = os.path.join(self.workDir, WORKDIR_META_RELDIR)
os.makedirs(self.metaDir, exist_ok=True)
self.marshallConfig(overwrite=False)
self.repoURL = None
self.repoTag = None
self.repoRelPath = None
self.repoDir = None
self.repoEffectiveCheckout = None
self.engine = None
self.engineVer = None
self.engineDesc = None
self.materializedParams = None
self.localWorkflow = None
self.materializedEngine = None
self.listOfContainers = None
self.exitVal = None
self.augmentedInputs = None
self.matCheckOutputs = None
self.cacheROCrateFilename = None
return self
FUSE_SYSTEM_CONF = '/etc/fuse.conf'
def setupWorkdir(self, doSecureWorkDir):
uniqueRawWorkDir = self.rawWorkDir
allowOther = False
if doSecureWorkDir:
# We need to detect whether fuse has enabled user_allow_other
# the only way I know is parsing /etc/fuse.conf
if not self.paranoidMode and os.path.exists(self.FUSE_SYSTEM_CONF):
with open(self.FUSE_SYSTEM_CONF, mode="r") as fsc:
for line in fsc:
if line.startswith('user_allow_other'):
allowOther = True
break
self.logger.debug(f"FUSE has user_allow_other: {allowOther}")
uniqueEncWorkDir = os.path.join(uniqueRawWorkDir, '.crypt')
uniqueWorkDir = os.path.join(uniqueRawWorkDir, 'work')
# The directories should exist before calling encryption FS mount
os.makedirs(uniqueEncWorkDir, exist_ok=True)
os.makedirs(uniqueWorkDir, exist_ok=True)
# This is the passphrase needed to decrypt the filesystem
passphraseFile = os.path.join(uniqueRawWorkDir, WORKDIR_PASSPHRASE_FILE)
encfs_cmd = self.encfs_cmd
if os.path.exists(passphraseFile):
clearF = io.BytesIO()
with open(passphraseFile, mode="rb") as encF:
crypt4gh.lib.decrypt(
[(0, self.privKey, None)],
encF,
clearF,
offset=0,
span=None,
sender_pubkey=None
)
encfs_type, _, securePassphrase = clearF.getvalue().decode('utf-8').partition('=')
self.logger.debug(encfs_type + ' ' + securePassphrase)
try:
encfs_type = EncryptedFSType(encfs_type)
except:
raise WFException('Invalid encryption filesystem {} in working directory'.format(encfs_type))
if encfs_type not in ENCRYPTED_FS_MOUNT_IMPLEMENTATIONS:
raise WFException('FIXME: Encryption filesystem {} mount procedure is not implemented')
# If the working directory encrypted filesystem does not
# match the configured one, use its default executable
if encfs_type != self.encfs_type:
encfs_cmd = DEFAULT_ENCRYPTED_FS_CMD[encfs_type]
if securePassphrase == '':
raise WFException('Encryption filesystem key does not follow the right format')
else:
securePassphrase = self.generate_passphrase()
encfs_type = self.encfs_type
clearF = io.BytesIO((encfs_type.value + '=' + securePassphrase).encode('utf-8'))
with open(passphraseFile, mode="wb") as encF:
crypt4gh.lib.encrypt(
[(0, self.privKey, self.pubKey)],
clearF,
encF,
offset=0,
span=None
)
del clearF
# Warn/fail earlier
if os.path.ismount(uniqueWorkDir):
# raise WFException("Destination mount point {} is already in use")
self.logger.warning("Destination mount point {} is already in use".format(uniqueWorkDir))
else:
# Now, time to mount the encrypted FS
ENCRYPTED_FS_MOUNT_IMPLEMENTATIONS[encfs_type](encfs_cmd, self.encfs_idleMinutes, uniqueEncWorkDir,
uniqueWorkDir, uniqueRawWorkDir, securePassphrase,
allowOther)
# and start the thread which keeps the mount working
self.encfsThread = threading.Thread(target=self._wakeupEncDir, daemon=True)
self.encfsThread.start()
# We are going to unmount what we have mounted
self.doUnmount = True
# self.encfsPassphrase = securePassphrase
del securePassphrase
else:
uniqueEncWorkDir = None
uniqueWorkDir = uniqueRawWorkDir
# The temporary directory is in the raw working directory as
# some container engine could fail
uniqueTempDir = os.path.join(uniqueRawWorkDir,'.TEMP')
os.makedirs(uniqueTempDir, exist_ok=True)
os.chmod(uniqueTempDir, 0o1777)
# Setting up working directories, one per instance
self.encWorkDir = uniqueEncWorkDir
self.workDir = uniqueWorkDir
self.tempDir = uniqueTempDir
self.allowOther = allowOther
def _wakeupEncDir(self):
"""
This method periodically checks whether the directory is still available
"""
while True:
time.sleep(60)
os.path.isdir(self.workDir)
def unmountWorkdir(self):
if self.doUnmount and (self.encWorkDir is not None):
# Only unmount if it is needed
if os.path.ismount(self.workDir):
with tempfile.NamedTemporaryFile() as encfs_umount_stdout, tempfile.NamedTemporaryFile() as encfs_umount_stderr:
fusermountCommand = [
self.fusermount_cmd,
'-u', # Umount the directory
'-z', # Even if it is not possible to umount it now, hide the mount point
self.workDir,
]
retval = subprocess.Popen(
fusermountCommand,
stdout=encfs_umount_stdout,
stderr=encfs_umount_stderr,
).wait()
if retval != 0:
with open(encfs_umount_stdout.name, mode="r") as c_stF:
encfs_umount_stdout_v = c_stF.read()
with open(encfs_umount_stderr.name, mode="r") as c_stF:
encfs_umount_stderr_v = c_stF.read()
errstr = "Could not umount {} (retval {})\nCommand: {}\n======\nSTDOUT\n======\n{}\n======\nSTDERR\n======\n{}".format(
self.encfs_type, retval, ' '.join(fusermountCommand), encfs_umount_stdout_v,
encfs_umount_stderr_v)
raise WFException(errstr)
# This is needed to avoid double work
self.doUnmount = False
self.encWorkDir = None
self.workDir = None
def cleanup(self):
self.unmountWorkdir()
def fromWorkDir(self, workflowWorkingDirectory):
if workflowWorkingDirectory is None:
raise WFException('Unable to initialize, no directory provided')
# Obtaining the absolute path to the working directory
if not os.path.isabs(workflowWorkingDirectory):
workflowWorkingDirectory = os.path.normpath(os.path.join(self.baseWorkDir, workflowWorkingDirectory))
if not os.path.isdir(workflowWorkingDirectory):
raise WFException('Unable to initialize, {} is not a directory'.format(workflowWorkingDirectory))
self.rawWorkDir = workflowWorkingDirectory
self.instanceId = os.path.basename(workflowWorkingDirectory)
# This is needed to parse
passphraseFile = os.path.join(self.rawWorkDir, WORKDIR_PASSPHRASE_FILE)
# Setting up the directory
self.setupWorkdir(os.path.exists(passphraseFile))
metaDir = os.path.join(self.workDir, WORKDIR_META_RELDIR)
if not os.path.isdir(metaDir):
raise WFException("Staged working directory {} is incomplete".format(self.workDir))
# In order to be able to build next paths to call
workflowMetaFilename = os.path.join(metaDir, WORKDIR_WORKFLOW_META_FILE)
securityContextFilename = os.path.join(metaDir, WORKDIR_SECURITY_CONTEXT_FILE)
return self.fromFiles(workflowMetaFilename, securityContextFilename)
def enableParanoidMode(self):
self.paranoidMode = True
def fromFiles(self, workflowMetaFilename, securityContextsConfigFilename=None, paranoidMode=False):
with open(workflowMetaFilename, mode="r", encoding="utf-8") as wcf:
workflow_meta = unmarshall_namedtuple(yaml.load(wcf, Loader=YAMLLoader))
# Last, try loading the security contexts credentials file
if securityContextsConfigFilename and os.path.exists(securityContextsConfigFilename):
with open(securityContextsConfigFilename, mode="r", encoding="utf-8") as scf:
creds_config = unmarshall_namedtuple(yaml.load(scf, Loader=YAMLLoader))
else:
creds_config = {}
return self.fromDescription(workflow_meta, creds_config, paranoidMode=paranoidMode)
def validateConfigFiles(self, workflowMetaFilename, securityContextsConfigFilename=None):
numErrors = 0
self.logger.info(f'Validating {workflowMetaFilename}')
with open(workflowMetaFilename, mode="r", encoding="utf-8") as wcf:
workflow_meta = unmarshall_namedtuple(yaml.load(wcf, Loader=YAMLLoader))
if not isinstance(workflow_meta, dict):
workflow_meta = {}
valErrors = self.ConfigValidate(workflow_meta, self.STAGE_DEFINITION_SCHEMA)
if len(valErrors) == 0:
self.logger.info('No validation errors in staging definition block')
else:
for iErr, valError in enumerate(valErrors):
self.logger.error(f'ERROR {iErr} in staging definition block: {valError}')
numErrors += 1
# Last, try loading the security contexts credentials file
if securityContextsConfigFilename and os.path.exists(securityContextsConfigFilename):
self.logger.info(f'Validating {securityContextsConfigFilename}')
with open(securityContextsConfigFilename, mode="r", encoding="utf-8") as scf:
creds_config = unmarshall_namedtuple(yaml.load(scf, Loader=YAMLLoader))
valErrors = self.ConfigValidate(creds_config, self.SECURITY_CONTEXT_SCHEMA)
if len(valErrors) == 0:
self.logger.info('No validation errors in security block')
else:
for iErr, valError in enumerate(valErrors):
self.logger.error(f'ERROR {iErr} in security context block: {valError}')
numErrors += 1
return 1 if numErrors > 0 else 0
def fromDescription(self, workflow_meta, creds_config=None, paranoidMode=False):
"""
:param workflow_meta: The configuration describing both the workflow
and the inputs to use when it is being instantiated.
:param creds_config: Dictionary with the different credential contexts (to be implemented)
:param paranoidMode:
:type workflow_meta: dict
:type creds_config: dict
:type paranoidMode:
:return: Workflow configuration
"""
# The preserved paranoid mode must be honoured
preserved_paranoid_mode = workflow_meta.get('paranoid_mode')
if preserved_paranoid_mode is not None:
paranoidMode = preserved_paranoid_mode
if paranoidMode:
self.enableParanoidMode()
return self.newSetup(
workflow_meta['workflow_id'],
workflow_meta.get('version'),
descriptor_type=workflow_meta.get('workflow_type'),
trs_endpoint=workflow_meta.get('trs_endpoint', self.DEFAULT_TRS_ENDPOINT),
params=workflow_meta.get('params', {}),
outputs=workflow_meta.get('outputs', {}),
workflow_config=workflow_meta.get('workflow_config'),
creds_config=creds_config
)
def fromForm(self, workflow_meta, paranoidMode=False): # VRE
"""
:param workflow_meta: The configuration describing both the workflow
and the inputs to use when it is being instantiated.
:param paranoidMode:
:type workflow_meta: dict
:type paranoidMode:
:return: Workflow configuration
"""
if paranoidMode:
self.enableParanoidMode()
return self.newSetup(
workflow_meta['workflow_id'],
workflow_meta.get('version'),
descriptor_type=workflow_meta.get('workflow_type'),
trs_endpoint=workflow_meta.get('trs_endpoint', self.DEFAULT_TRS_ENDPOINT),
params=workflow_meta.get('params', {}),
workflow_config=workflow_meta.get('workflow_config')
)
def fetchWorkflow(self, offline=False):
"""
Fetch the whole workflow description based on the data obtained
from the TRS where it is being published.
If the workflow id is an URL, it is supposed to be a git repository,
and the version will represent either the branch, tag or specific commit.
So, the whole TRS fetching machinery is bypassed.
"""
parsedRepoURL = parse.urlparse(self.id)
# It is not an absolute URL, so it is being an identifier in the workflow
if parsedRepoURL.scheme == '':
if (self.trs_endpoint is not None) and len(self.trs_endpoint) > 0:
engineDesc, repoURL, repoTag, repoRelPath = self.getWorkflowRepoFromTRS(offline=offline)
else:
raise WFException('trs_endpoint was not provided')
else:
engineDesc = None
# Trying to be smarter
guessedRepoURL, guessedRepoTag, guessedRepoRelPath = self.guessRepoParams(parsedRepoURL, fail_ok=False)
if guessedRepoURL is not None:
repoURL = guessedRepoURL
repoTag = guessedRepoTag if guessedRepoTag is not None else self.version_id
repoRelPath = guessedRepoRelPath
else:
engineDesc, repoURL, repoTag, repoRelPath = self.getWorkflowRepoFromROCrateURL(self.id, offline=offline)
if repoURL is None:
# raise WFException('Unable to guess repository from RO-Crate manifest')
repoURL = self.id
repoTag = self.version_id
repoRelPath = None
repoDir = None
repoEffectiveCheckout = None
if ':' in repoURL:
parsedRepoURL = parse.urlparse(repoURL)
if len(parsedRepoURL.scheme) > 0:
self.repoURL = repoURL
self.repoTag = repoTag
# It can be either a relative path to a directory or to a file
# It could be even empty!
if repoRelPath == '':
repoRelPath = None
self.repoRelPath = repoRelPath
repoDir, repoEffectiveCheckout = self.doMaterializeRepo(repoURL, repoTag)
# For the cases of pure TRS repos, like Dockstore
if repoDir is None:
repoDir = repoURL
# Workflow Language version cannot be assumed here yet
localWorkflow = LocalWorkflow(dir=repoDir, relPath=repoRelPath, effectiveCheckout=repoEffectiveCheckout)
self.logger.info("materialized workflow repository (checkout {}): {}".format(repoEffectiveCheckout, repoDir))
if repoRelPath is not None:
if not os.path.exists(os.path.join(repoDir, repoRelPath)):
raise WFException(
"Relative path {} cannot be found in materialized workflow repository {}".format(repoRelPath,
repoDir))
# A valid engine must be identified from the fetched content
# TODO: decide whether to force some specific version
if engineDesc is None:
for engineDesc in self.WORKFLOW_ENGINES:
self.logger.debug("Testing engine " + engineDesc.trs_descriptor)
engine = engineDesc.clazz(cacheDir=self.cacheDir, workflow_config=self.workflow_config,
local_config=self.local_config, engineTweaksDir=self.engineTweaksDir,
cacheWorkflowDir=self.cacheWorkflowDir,
cacheWorkflowInputsDir=self.cacheWorkflowInputsDir,
workDir=self.workDir,
outputsDir=self.outputsDir, intermediateDir=self.intermediateDir,
tempDir=self.tempDir, secure_exec=self.secure or self.paranoidMode,
allowOther=self.allowOther, config_directory=self.config_directory)
try:
engineVer, candidateLocalWorkflow = engine.identifyWorkflow(localWorkflow)
self.logger.debug("Tested engine {} {}".format(engineDesc.trs_descriptor, engineVer))
if engineVer is not None:
break
except WorkflowEngineException:
# TODO: store the exceptions, to be shown if no workflow is recognized
pass
else:
raise WFException('No engine recognized a workflow at {}'.format(repoURL))
else:
self.logger.debug("Fixed engine " + engineDesc.trs_descriptor)
engine = engineDesc.clazz(cacheDir=self.cacheDir, workflow_config=self.workflow_config,
local_config=self.local_config, engineTweaksDir=self.engineTweaksDir,
cacheWorkflowDir=self.cacheWorkflowDir,
cacheWorkflowInputsDir=self.cacheWorkflowInputsDir,
workDir=self.workDir,
outputsDir=self.outputsDir, intermediateDir=self.intermediateDir,
tempDir=self.tempDir, secure_exec=self.secure or self.paranoidMode,
allowOther=self.allowOther, config_directory=self.config_directory)
engineVer, candidateLocalWorkflow = engine.identifyWorkflow(localWorkflow)
if engineVer is None:
raise WFException(
'Engine {} did not recognize a workflow at {}'.format(engine.workflowType.engineName, repoURL))
self.repoDir = repoDir
self.repoEffectiveCheckout = repoEffectiveCheckout
self.engineDesc = engineDesc
self.engine = engine
self.engineVer = engineVer
self.localWorkflow = candidateLocalWorkflow
def setupEngine(self, offline=False):
# The engine is populated by self.fetchWorkflow()
if self.engine is None:
self.fetchWorkflow(offline=offline)
if self.materializedEngine is None:
localWorkflow = self.localWorkflow
else:
localWorkflow = self.materializedEngine.workflow
self.materializedEngine = self.engine.materializeEngine(localWorkflow, self.engineVer)
def materializeWorkflow(self, offline=False):
if self.materializedEngine is None:
self.setupEngine(offline=offline)
# This information is badly needed for provenance
if self.listOfContainers is None:
self.materializedEngine, self.listOfContainers = WorkflowEngine.MaterializeWorkflow(self.materializedEngine, offline=offline)
def addSchemeHandler(self, scheme, handler):
"""
:param scheme:
:param handler:
"""
if not isinstance(handler, (
types.FunctionType, types.LambdaType, types.MethodType, types.BuiltinFunctionType,
types.BuiltinMethodType)):
raise WFException('Trying to set for scheme {} a invalid handler'.format(scheme))
self.cacheHandler.addSchemeHandlers({scheme.lower(): handler})
def injectInputs(self, paths, workflowInputs_destdir=None, workflowInputs_cacheDir=None, lastInput=0):
if workflowInputs_destdir is None:
workflowInputs_destdir = self.inputsDir
if workflowInputs_cacheDir is None:
workflowInputs_cacheDir = self.cacheWorkflowInputsDir
cacheable = not self.paranoidMode
# The storage dir depends on whether it can be cached or not
storeDir = workflowInputs_cacheDir if cacheable else workflowInputs_destdir
for path in paths:
# We are sending the context name thinking in the future,
# as it could contain potential hints for authenticated access
fileuri = parse.urlunparse(('file', '', os.path.abspath(path), '', '', ''))
matContent = self.downloadInputFile(fileuri, workflowInputs_destdir=storeDir, ignoreCache=not cacheable, registerInCache=cacheable)
# Now, time to create the symbolic link
lastInput += 1
prettyLocal = os.path.join(workflowInputs_destdir, matContent.prettyFilename)
hardenPrettyLocal = False
if os.path.islink(prettyLocal):
oldLocal = os.readlink(prettyLocal)
hardenPrettyLocal = oldLocal != matContent.local
elif os.path.exists(prettyLocal):
hardenPrettyLocal = True
if hardenPrettyLocal:
# Trying to avoid collisions on input naming
prettyLocal = os.path.join(workflowInputs_destdir, str(lastInput) + '_' + matContent.prettyFilename)
if not os.path.exists(prettyLocal):
os.symlink(matContent.local, prettyLocal)
return lastInput
def materializeInputs(self, offline: bool = False, lastInput=0):
theParams, numInputs = self.fetchInputs(self.params, workflowInputs_destdir=self.inputsDir,
workflowInputs_cacheDir=self.cacheWorkflowInputsDir, offline=offline,
lastInput=lastInput)
self.materializedParams = theParams
def fetchInputs(self, params, workflowInputs_destdir: AbsPath = None, workflowInputs_cacheDir: AbsPath = None,
prefix='', lastInput=0, offline: bool = False) -> Tuple[List[MaterializedInput], int]:
"""
Fetch the input files for the workflow execution.
All the inputs must be URLs or CURIEs from identifiers.org / n2t.net.
:param params: Optional params for the workflow execution.
:param workflowInputs_destdir:
:param prefix:
:param workflowInputs_cacheDir:
:param lastInput:
:param offline:
:type params: dict
:type prefix: str
"""
theInputs = []
paramsIter = params.items() if isinstance(params, dict) else enumerate(params)
for key, inputs in paramsIter:
# We are here for the
linearKey = prefix + key
if isinstance(inputs, dict):
inputClass = inputs.get('c-l-a-s-s')
if inputClass is not None:
if inputClass in ("File", "Directory"): # input files
inputDestDir = workflowInputs_destdir
globExplode = None
if inputClass == 'Directory':
# We have to autofill this with the outputs directory,
# so results are properly stored (without escaping the jail)
if inputs.get('autoFill', False):
if inputs.get('autoPrefix', True):
autoFilledDir = os.path.join(self.outputsDir, *linearKey.split('.'))
else:
autoFilledDir = self.outputsDir
theInputs.append(MaterializedInput(linearKey, [autoFilledDir]))
continue
globExplode = inputs.get('globExplode')
# This is to nest the directory where to place the different files
inputDestDir = os.path.join(inputDestDir, *linearKey.split('.'))
os.makedirs(inputDestDir, exist_ok=True)
remote_files = inputs['url']
cacheable = not self.paranoidMode if inputs.get('cache', True) else False
if not isinstance(remote_files, list): # more than one input file
remote_files = [remote_files]
remote_pairs = []
# The storage dir depends on whether it can be cached or not
storeDir = workflowInputs_cacheDir if cacheable else inputDestDir
for remote_file in remote_files:
# We are sending the context name thinking in the future,
# as it could contain potential hints for authenticated access
contextName = inputs.get('security-context')
matContent = self.downloadInputFile(remote_file,
workflowInputs_destdir=storeDir,
contextName=contextName,
offline=offline,
ignoreCache=not cacheable,
registerInCache=cacheable,
)
# Now, time to create the symbolic link
lastInput += 1
prettyLocal = os.path.join(inputDestDir, matContent.prettyFilename)
hardenPrettyLocal = False
if os.path.islink(prettyLocal):
oldLocal = os.readlink(prettyLocal)
hardenPrettyLocal = oldLocal != matContent.local
elif os.path.exists(prettyLocal):
hardenPrettyLocal = True
if hardenPrettyLocal:
# Trying to avoid collisions on input naming
prettyLocal = os.path.join(inputDestDir,
str(lastInput) + '_' + matContent.prettyFilename)
if not os.path.exists(prettyLocal):
os.symlink(matContent.local, prettyLocal)
if globExplode is not None:
prettyLocalPath = pathlib.Path(prettyLocal)
matParse = parse.urlparse(matContent.uri)
for exp in prettyLocalPath.glob(globExplode):
relPath = exp.relative_to(prettyLocalPath)
relName = str(relPath)
relExpPath = matParse.path
if relExpPath[-1] != '/':
relExpPath += '/'
relExpPath += '/'.join(map(lambda part: parse.quote_plus(part), relPath.parts))
expUri = parse.urlunparse((matParse.scheme, matParse.netloc, relExpPath, matParse.params, matParse.query, matParse.fragment))
remote_pairs.append(
MaterializedContent(
local=str(exp),
uri=expUri,
prettyFilename=relName,
metadata_array=matContent.metadata_array,
kind=ContentKind.Directory if exp.is_dir() else ContentKind.File
)
)
else:
remote_pairs.append(
MaterializedContent(prettyLocal, matContent.uri, matContent.prettyFilename,
matContent.kind, matContent.metadata_array))
theInputs.append(MaterializedInput(linearKey, remote_pairs))
else:
raise WFException(
'Unrecognized input class "{}", attached to "{}"'.format(inputClass, linearKey))
else:
# possible nested files
newInputsAndParams, lastInput = self.fetchInputs(inputs,
workflowInputs_destdir=workflowInputs_destdir,
workflowInputs_cacheDir=workflowInputs_cacheDir,
prefix=linearKey + '.', lastInput=lastInput,
offline=offline)
theInputs.extend(newInputsAndParams)
else:
if not isinstance(inputs, list):
inputs = [inputs]
theInputs.append(MaterializedInput(linearKey, inputs))
return theInputs, lastInput
def stageWorkDir(self):
"""
This method is here to simplify the understanding of the needed steps
"""
self.fetchWorkflow()
self.setupEngine()
self.materializeWorkflow()
self.materializeInputs()
self.marshallStage()
return self.instanceId
def workdirToBagit(self):
"""
BEWARE: This is a destructive step! So, once run, there is no back!
"""
self.bag = bagit.make_bag(self.workDir)
DefaultCardinality = '1'
CardinalityMapping = {
'1': (1, 1),
'?': (0, 1),
'*': (0, sys.maxsize),
'+': (1, sys.maxsize),
}
OutputClassMapping = {
ContentKind.File.name: ContentKind.File,
ContentKind.Directory.name: ContentKind.Directory,
ContentKind.Value.name: ContentKind.Value,
}
def parseExpectedOutputs(self, outputs: Union[List[Any], Mapping[str, Any]]) -> List[ExpectedOutput]:
expectedOutputs = []
# TODO: implement parsing of outputs
outputsIter = outputs.items() if isinstance(outputs, dict) else enumerate(outputs)
for outputKey, outputDesc in outputsIter:
# The glob pattern
patS = outputDesc.get('glob')
if patS is not None:
if len(patS) == 0:
patS = None
# Parsing the cardinality
cardS = outputDesc.get('cardinality')
cardinality = None
if cardS is not None:
if isinstance(cardS, int):
if cardS < 1:
cardinality = (0, 1)
else:
cardinality = (cardS, cardS)
elif isinstance(cardS, list):
cardinality = (int(cardS[0]), int(cardS[1]))
else:
cardinality = self.CardinalityMapping.get(cardS)
if cardinality is None:
cardinality = self.CardinalityMapping[self.DefaultCardinality]
eOutput = ExpectedOutput(
name=outputKey,
kind=self.OutputClassMapping.get(outputDesc.get('c-l-a-s-s'), ContentKind.File.name),
preferredFilename=outputDesc.get('preferredName'),
cardinality=cardinality,
glob=patS,
)
expectedOutputs.append(eOutput)
return expectedOutputs
def executeWorkflow(self, offline : bool = False):
self.unmarshallStage(offline=offline)
exitVal, augmentedInputs, matCheckOutputs = WorkflowEngine.ExecuteWorkflow(self.materializedEngine,
self.materializedParams,
self.outputs)
self.exitVal = exitVal
self.augmentedInputs = augmentedInputs
self.matCheckOutputs = matCheckOutputs
self.logger.debug(exitVal)
self.logger.debug(augmentedInputs)
self.logger.debug(matCheckOutputs)
# TODO: implement store serialized version of exitVal, augmentedInputs and matCheckOutputs
self.marshallExecute()
def exportResults(self):
self.unmarshallExecute(offline=True)
# TODO
self.marshallExport()
def marshallConfig(self, overwrite : bool = False):
workflow_meta_file = os.path.join(self.metaDir, WORKDIR_WORKFLOW_META_FILE)
if overwrite or not os.path.exists(workflow_meta_file):
with open(workflow_meta_file, mode='w', encoding='utf-8') as wmF:
workflow_meta = {
'workflow_id': self.id,
'paranoid_mode': self.paranoidMode
}
if self.version_id is not None:
workflow_meta['version'] = self.version_id
if self.descriptor_type is not None:
workflow_meta['workflow_type'] = self.descriptor_type
if self.trs_endpoint is not None:
workflow_meta['trs_endpoint'] = self.trs_endpoint
if self.workflow_config is not None:
workflow_meta['workflow_config'] = self.workflow_config
if self.params is not None:
workflow_meta['params'] = self.params
if self.outputs is not None:
outputs = { output.name: output for output in self.outputs }
workflow_meta['outputs'] = outputs
yaml.dump(marshall_namedtuple(workflow_meta), wmF, Dumper=YAMLDumper)
creds_file = os.path.join(self.metaDir, WORKDIR_SECURITY_CONTEXT_FILE)
if overwrite or not os.path.exists(creds_file):
with open(creds_file, mode='w', encoding='utf-8') as crF:
yaml.dump(marshall_namedtuple(self.creds_config), crF, Dumper=YAMLDumper)
def marshallStage(self, exist_ok : bool = True):
if not self.stageMarshalled:
self.marshallConfig(overwrite=False)
marshalled_stage_file = os.path.join(self.metaDir, WORKDIR_MARSHALLED_STAGE_FILE)
if os.path.exists(marshalled_stage_file):
if not exist_ok:
raise WFException("Marshalled stage file already exists")
self.logger.debug("Marshalled stage file {} already exists".format(marshalled_stage_file))
else:
stage = {
'repoURL': self.repoURL,
'repoTag': self.repoTag,
'repoRelPath': self.repoRelPath,
'repoEffectiveCheckout': self.repoEffectiveCheckout,
'engineDesc': self.engineDesc,
'engineVer': self.engineVer,
'materializedEngine': self.materializedEngine,
'containers': self.listOfContainers,
'materializedParams': self.materializedParams
# TODO: check nothing essential was left
}
self.logger.debug("Creating marshalled stage file {}".format(marshalled_stage_file))
with open(marshalled_stage_file, mode='w', encoding='utf-8') as msF:
marshalled_stage = marshall_namedtuple(stage)
yaml.dump(marshalled_stage, msF, Dumper=YAMLDumper)
self.stageMarshalled = True
elif not exist_ok:
raise WFException("Marshalled stage file already exists")
def unmarshallStage(self, offline : bool = False):
if not self.stageMarshalled:
marshalled_stage_file = os.path.join(self.metaDir, WORKDIR_MARSHALLED_STAGE_FILE)
if not os.path.exists(marshalled_stage_file):
raise WFException("Marshalled stage file does not exists. Stage state was not stored")
self.logger.debug("Parsing marshalled stage state file {}".format(marshalled_stage_file))
with open(marshalled_stage_file, mode='r', encoding='utf-8') as msF:
marshalled_stage = yaml.load(msF, Loader=YAMLLoader)
try:
stage = unmarshall_namedtuple(marshalled_stage, globals())
self.repoURL = stage['repoURL']
self.repoTag = stage['repoTag']
self.repoRelPath = stage['repoRelPath']
self.repoEffectiveCheckout = stage['repoEffectiveCheckout']
self.engineDesc = stage['engineDesc']
self.engineVer = stage['engineVer']
self.materializedEngine = stage['materializedEngine']
self.listOfContainers = stage['containers']
self.materializedParams = stage['materializedParams']
# This is needed to properly set up the materializedEngine
self.setupEngine(offline=True)
except Exception as e:
raise WFException("Error while unmarshalling content from stage state file {}. Reason: {}".format(marshalled_stage_file,e))
self.stageMarshalled = True
def marshallExecute(self, exist_ok : bool = True):
if not self.executionMarshalled:
self.marshallStage(exist_ok=exist_ok)
marshalled_execution_file = os.path.join(self.metaDir, WORKDIR_MARSHALLED_EXECUTE_FILE)
if os.path.exists(marshalled_execution_file):
if not exist_ok:
raise WFException("Marshalled execution file already exists")
self.logger.debug("Marshalled execution file {} already exists".format(marshalled_execution_file))
else:
execution = {
'exitVal': self.exitVal,
'augmentedInputs': self.augmentedInputs,
'matCheckOutputs': self.matCheckOutputs
# TODO: check nothing essential was left
}
self.logger.debug("Creating marshalled execution file {}".format(marshalled_execution_file))
with open(marshalled_execution_file, mode='w', encoding='utf-8') as msF:
yaml.dump(marshall_namedtuple(execution), msF, Dumper=YAMLDumper)
self.executionMarshalled = True
elif not exist_ok:
raise WFException("Marshalled execution file already exists")
def unmarshallExecute(self, offline : bool = True):
if not self.executionMarshalled:
self.unmarshallStage(offline=offline)
marshalled_execution_file = os.path.join(self.metaDir, WORKDIR_MARSHALLED_EXECUTE_FILE)
if not os.path.exists(marshalled_execution_file):
raise WFException("Marshalled execution file does not exists. Execution state was not stored")
self.logger.debug("Parsing marshalled execution state file {}".format(marshalled_execution_file))
with open(marshalled_execution_file, mode='r', encoding='utf-8') as meF:
marshalled_execution = yaml.load(meF, Loader=YAMLLoader)
try:
execution = unmarshall_namedtuple(marshalled_execution, globals())
self.exitVal = execution['exitVal']
self.augmentedInputs = execution['augmentedInputs']
self.matCheckOutputs = execution['matCheckOutputs']
except Exception as e:
raise WFException("Error while unmarshalling content from execution state file {}. Reason: {}".format(marshalled_execution_file, e))
self.executionMarshalled = True
def marshallExport(self, exist_ok : bool = True):
if not self.exportMarshalled:
self.marshallExecute(exist_ok=exist_ok)
marshalled_export_file = os.path.join(self.metaDir, WORKDIR_MARSHALLED_EXPORT_FILE)
if os.path.exists(marshalled_export_file):
if not exist_ok:
raise WFException("Marshalled export results file already exists")
self.logger.debug("Marshalled export results file {} already exists".format(marshalled_export_file))
else:
exported_results = {
# TODO
}
self.logger.debug("Creating marshalled export results file {}".format(marshalled_export_file))
with open(marshalled_export_file, mode='w', encoding='utf-8') as msF:
yaml.dump(marshall_namedtuple(exported_results), msF, Dumper=YAMLDumper)
self.exportMarshalled = True
elif not exist_ok:
raise WFException("Marshalled export results file already exists")
def unmarshallExport(self, offline : bool = True):
if not self.exportMarshalled:
self.unmarshallExecute(offline=offline)
marshalled_export_file = os.path.join(self.metaDir, WORKDIR_MARSHALLED_EXPORT_FILE)
if not os.path.exists(marshalled_export_file):
raise WFException("Marshalled export results file does not exists. Export results state was not stored")
self.logger.debug("Parsing marshalled export results state file {}".format(marshalled_export_file))
with open(marshalled_export_file, mode='r', encoding='utf-8') as meF:
marshalled_export = yaml.load(meF, Loader=YAMLLoader)
try:
exported_results = unmarshall_namedtuple(marshalled_export, globals())
# TODO
except Exception as e:
raise WFException(f"Error while unmarshalling content from export results state file {marshalled_export_file}. Reason: {e}")
self.exportMarshalled = True
def createStageResearchObject(self, doMaterializedROCrate : bool = False):
"""
Create RO-crate from stage provenance.
"""
# TODO: implement deserialization
self.unmarshallStage(offline=True)
# TODO: implement logic of doMaterializedROCrate
# TODO
pass
def createResultsResearchObject(self, doMaterializedROCrate : bool = False):
"""
Create RO-crate from execution provenance.
"""
# TODO: implement deserialization
self.unmarshallExport(offline=True)
# TODO: implement logic of doMaterializedROCrate
# TODO: digest the results from executeWorkflow plus all the provenance
# Create RO-crate using crate.zip downloaded from WorkflowHub
if os.path.isfile(str(self.cacheROCrateFilename)):
wfCrate = rocrate.ROCrate(self.cacheROCrateFilename)
# Create RO-Crate using rocrate_api
# TODO no exists the version implemented for Nextflow in rocrate_api
else:
# FIXME: What to do when workflow is in git repository different from GitHub??
# FIXME: What to do when workflow is not in a git repository??
wf_path = os.path.join(self.localWorkflow.dir, self.localWorkflow.relPath)
wfCrate, compLang = self.materializedEngine.instance.getEmptyCrateAndComputerLanguage(
self.localWorkflow.langVersion)
wf_url = self.repoURL.replace(".git", "/") + "tree/" + self.repoTag + "/" + os.path.dirname(
self.localWorkflow.relPath)
# TODO create method to create wf_url
matWf = self.materializedEngine.workflow
parsed_repo_url = parse.urlparse(self.repoURL)
if parsed_repo_url.netloc == 'github.com':
parsed_repo_path = parsed_repo_url.path.split('/')
repo_name = parsed_repo_path[2]
if repo_name.endswith('.git'):
repo_name = repo_name[:-4]
wf_entrypoint_path = [
'', # Needed to prepend a slash
parsed_repo_path[1],
repo_name,
matWf.effectiveCheckout,
self.localWorkflow.relPath
]
wf_entrypoint_url = parse.urlunparse(
('https', 'raw.githubusercontent.com', '/'.join(wf_entrypoint_path), '', '', ''))
else:
raise WFException("FIXME: Unsupported http(s) git repository {}".format(self.repoURL))
# TODO assign something meaningful to cwl
cwl = True
workflow_path = Path(wf_path)
wf_file = wfCrate.add_workflow(
str(workflow_path), workflow_path.name, fetch_remote=False,
main=True, lang=compLang, gen_cwl=(cwl is None)
)
# This is needed, as it is not automatically added when the
# `lang` argument in workflow creation was not a string
wfCrate.add(compLang)
# if the source is a remote URL then add https://schema.org/codeRepository
# property to it this can be checked by checking if the source is a URL
# instead of a local path
wf_file.properties()['url'] = wf_entrypoint_url
wf_file.properties()['codeRepository'] = wf_url
# if 'url' in wf_file.properties():
# wf_file['codeRepository'] = wf_file['url']
# TODO: add extra files, like nextflow.config in the case of
# Nextflow workflows, the diagram, an abstract CWL
# representation of the workflow (when it is not a CWL workflow)
# etc...
# for file_entry in include_files:
# wfCrate.add_file(file_entry)
wfCrate.isBasedOn = wf_url
# Add inputs provenance to RO-crate
for in_item in self.augmentedInputs:
if isinstance(in_item, MaterializedInput):
itemInValues = in_item.values[0]
if isinstance(itemInValues, MaterializedContent):
# TODO: embed metadata_array in some way
itemInSource = itemInValues.local
if os.path.isfile(itemInSource):
properties = {
'name': in_item.name,
'url': itemInValues.uri
}
wfCrate.add_file(source=itemInSource, properties=properties)
elif os.path.isdir(itemInSource):
self.logger.error("FIXME: input directory / dataset handling in RO-Crate")
else:
pass # TODO raise Exception
# TODO digest other types of inputs
# Add outputs provenance to RO-crate
# for out_item in self.matCheckOutputs:
# if isinstance(out_item, MaterializedOutput):
# itemOutKind = out_item.kind.value
# itemOutValues = out_item.values[0]
# itemOutSource = itemOutValues.local
# properties = {'name': out_item.name}
# if itemOutKind == "dir":
# if isinstance(itemOutValues, GeneratedDirectoryContent):
# if os.path.isdir(itemOutSource):
# dirProperties = dict.fromkeys(['values'])
# dirProperties['values'] = itemOutValues.values
# properties.update(dirProperties)
# wfCrate.add_directory(source=itemOutSource, properties=properties)
#
# else:
# pass # TODO raise Exception
#
# elif itemOutKind == "file":
# if isinstance(itemOutValues, GeneratedContent):
# if os.path.isfile(itemOutSource):
# fileProperties = {
# 'uri': itemOutValues.uri
# }
# properties.update(fileProperties)
# wfCrate.add_file(source=itemOutSource, properties=properties)
#
# else:
# pass # TODO raise Exception
# # elif itemOutKind == "val":
# else:
# pass # TODO raise Exception
# Save RO-crate as execution.crate.zip
wfCrate.writeZip(os.path.join(self.outputsDir, "execution.crate"))
self.logger.info("RO-Crate created: {}".format(self.outputsDir))
# TODO error handling
def doMaterializeRepo(self, repoURL, repoTag: RepoTag = None, doUpdate: bool = True) -> Tuple[AbsPath, RepoTag]:
"""
:param repoURL:
:param repoTag:
:param doUpdate:
:return:
"""
repo_hashed_id = hashlib.sha1(repoURL.encode('utf-8')).hexdigest()
repo_hashed_tag_id = hashlib.sha1(b'' if repoTag is None else repoTag.encode('utf-8')).hexdigest()
# Assure directory exists before next step
repo_destdir = os.path.join(self.cacheWorkflowDir, repo_hashed_id)
if not os.path.exists(repo_destdir):
try:
os.makedirs(repo_destdir)
except IOError:
errstr = "ERROR: Unable to create intermediate directories for repo {}. ".format(repoURL)
raise WFException(errstr)
repo_tag_destdir = os.path.join(repo_destdir, repo_hashed_tag_id)
# We are assuming that, if the directory does exist, it contains the repo
doRepoUpdate = True
if not os.path.exists(repo_tag_destdir):
# Try cloning the repository without initial checkout
if repoTag is not None:
gitclone_params = [
self.git_cmd, 'clone', '-n', '--recurse-submodules', repoURL, repo_tag_destdir
]
# Now, checkout the specific commit
gitcheckout_params = [
self.git_cmd, 'checkout', repoTag
]
else:
# We know nothing about the tag, or checkout
gitclone_params = [
self.git_cmd, 'clone', '--recurse-submodules', repoURL, repo_tag_destdir
]
gitcheckout_params = None
elif doUpdate:
gitclone_params = None
gitcheckout_params = [
self.git_cmd, 'pull', '--recurse-submodules'
]
if repoTag is not None:
gitcheckout_params.extend(['origin', repoTag])
else:
doRepoUpdate = False
if doRepoUpdate:
with tempfile.NamedTemporaryFile() as git_stdout, tempfile.NamedTemporaryFile() as git_stderr:
# First, (bare) clone
retval = 0
if gitclone_params is not None:
retval = subprocess.call(gitclone_params, stdout=git_stdout, stderr=git_stderr)
# Then, checkout (which can be optional)
if retval == 0 and (gitcheckout_params is not None):
retval = subprocess.Popen(gitcheckout_params, stdout=git_stdout, stderr=git_stderr,
cwd=repo_tag_destdir).wait()
# Last, submodule preparation
if retval == 0:
# Last, initialize submodules
gitsubmodule_params = [
self.git_cmd, 'submodule', 'update', '--init', '--recursive'
]
retval = subprocess.Popen(gitsubmodule_params, stdout=git_stdout, stderr=git_stderr,
cwd=repo_tag_destdir).wait()
# Proper error handling
if retval != 0:
# Reading the output and error for the report
with open(git_stdout.name, "r") as c_stF:
git_stdout_v = c_stF.read()
with open(git_stderr.name, "r") as c_stF:
git_stderr_v = c_stF.read()
errstr = "ERROR: Unable to pull '{}' (tag '{}'). Retval {}\n======\nSTDOUT\n======\n{}\n======\nSTDERR\n======\n{}".format(
repoURL, repoTag, retval, git_stdout_v, git_stderr_v)
raise WFException(errstr)
# Last, we have to obtain the effective checkout
gitrevparse_params = [
self.git_cmd, 'rev-parse', '--verify', 'HEAD'
]
with subprocess.Popen(gitrevparse_params, stdout=subprocess.PIPE, encoding='iso-8859-1',
cwd=repo_tag_destdir) as revproc:
repo_effective_checkout = revproc.stdout.read().rstrip()
return repo_tag_destdir, repo_effective_checkout
def getWorkflowRepoFromTRS(self, offline: bool = False) -> Tuple[WorkflowType, RepoURL, RepoTag, RelPath]:
"""
:return:
"""
# Now, time to check whether it is a TRSv2
trs_endpoint_v2_meta = self.trs_endpoint + 'service-info'
trs_endpoint_v2_beta2_meta = self.trs_endpoint + 'metadata'
trs_endpoint_meta = None
# Needed to store this metadata
trsMetadataCache = os.path.join(self.metaDir, self.TRS_METADATA_FILE)
try:
metaContentKind , cachedTRSMetaFile , trsMetaMeta = self.cacheHandler.fetch(trs_endpoint_v2_meta, self.metaDir, offline)
trs_endpoint_meta = trs_endpoint_v2_meta
except WFException as wfe:
try:
metaContentKind , cachedTRSMetaFile , trsMetaMeta = self.cacheHandler.fetch(trs_endpoint_v2_beta2_meta, self.metaDir, offline)
trs_endpoint_meta = trs_endpoint_v2_beta2_meta
except WFException as wfebeta:
raise WFException("Unable to fetch metadata from {} in order to identify whether it is a working GA4GH TRSv2 endpoint. Exceptions:\n{}\n{}".format(self.trs_endpoint, wfe, wfebeta))
# Giving a friendly name
if not os.path.exists(trsMetadataCache):
os.symlink(os.path.basename(cachedTRSMetaFile), trsMetadataCache)
with open(trsMetadataCache, mode="r", encoding="utf-8") as ctmf:
self.trs_endpoint_meta = json.load(ctmf)
# Minimal check
trs_version = self.trs_endpoint_meta.get('api_version')
if trs_version is None:
trs_version = self.trs_endpoint_meta.get('type', {}).get('version')
if trs_version is None:
raise WFException("Unable to identify TRS version from {}".format(trs_endpoint_meta))
# Now, check the tool does exist in the TRS, and the version
trs_tools_url = parse.urljoin(self.trs_endpoint, self.TRS_TOOLS_PATH + parse.quote(self.id, safe=''))
trsQueryCache = os.path.join(self.metaDir, self.TRS_QUERY_CACHE_FILE)
_ , cachedTRSQueryFile , _ = self.cacheHandler.fetch(trs_tools_url, self.metaDir, offline)
# Giving a friendly name
if not os.path.exists(trsQueryCache):
os.symlink(os.path.basename(cachedTRSQueryFile), trsQueryCache)
with open(trsQueryCache, mode="r", encoding="utf-8") as tQ:
rawToolDesc = tQ.read()
# If the tool does not exist, an exception will be thrown before
jd = json.JSONDecoder()
toolDesc = jd.decode(rawToolDesc)
# If the tool is not a workflow, complain
if toolDesc.get('toolclass', {}).get('name', '') != 'Workflow':
raise WFException(
'Tool {} from {} is not labelled as a workflow. Raw answer:\n{}'.format(self.id, self.trs_endpoint,
rawToolDesc))
possibleToolVersions = toolDesc.get('versions', [])
if len(possibleToolVersions) == 0:
raise WFException(
'Version {} not found in workflow {} from {} . Raw answer:\n{}'.format(self.version_id, self.id,
self.trs_endpoint, rawToolDesc))
toolVersion = None
toolVersionId = self.version_id
if (toolVersionId is not None) and len(toolVersionId) > 0:
for possibleToolVersion in possibleToolVersions:
if isinstance(possibleToolVersion, dict):
possibleId = str(possibleToolVersion.get('id', ''))
possibleName = str(possibleToolVersion.get('name', ''))
if self.version_id in (possibleId, possibleName):
toolVersion = possibleToolVersion
break
else:
raise WFException(
'Version {} not found in workflow {} from {} . Raw answer:\n{}'.format(self.version_id, self.id,
self.trs_endpoint,
rawToolDesc))
else:
toolVersionId = ''
for possibleToolVersion in possibleToolVersions:
possibleToolVersionId = str(possibleToolVersion.get('id', ''))
if len(possibleToolVersionId) > 0 and toolVersionId < possibleToolVersionId:
toolVersion = possibleToolVersion
toolVersionId = possibleToolVersionId
if toolVersion is None:
raise WFException(
'No valid version was found in workflow {} from {} . Raw answer:\n{}'.format(self.id, self.trs_endpoint,
rawToolDesc))
# The version has been found
toolDescriptorTypes = toolVersion.get('descriptor_type', [])
if not isinstance(toolDescriptorTypes, list):
raise WFException(
'Version {} of workflow {} from {} has no valid "descriptor_type" (should be a list). Raw answer:\n{}'.format(
self.version_id, self.id, self.trs_endpoint, rawToolDesc))
# Now, realize whether it matches
chosenDescriptorType = self.descriptor_type
if chosenDescriptorType is None:
for candidateDescriptorType in self.RECOGNIZED_TRS_DESCRIPTORS.keys():
if candidateDescriptorType in toolDescriptorTypes:
chosenDescriptorType = candidateDescriptorType
break
else:
raise WFException(
'Version {} of workflow {} from {} has no acknowledged "descriptor_type". Raw answer:\n{}'.format(
self.version_id, self.id, self.trs_endpoint, rawToolDesc))
elif chosenDescriptorType not in toolVersion['descriptor_type']:
raise WFException(
'Descriptor type {} not available for version {} of workflow {} from {} . Raw answer:\n{}'.format(
self.descriptor_type, self.version_id, self.id, self.trs_endpoint, rawToolDesc))
elif chosenDescriptorType not in self.RECOGNIZED_TRS_DESCRIPTORS:
raise WFException(
'Descriptor type {} is not among the acknowledged ones by this backend. Version {} of workflow {} from {} . Raw answer:\n{}'.format(
self.descriptor_type, self.version_id, self.id, self.trs_endpoint, rawToolDesc))
toolFilesURL = trs_tools_url + '/versions/' + parse.quote(toolVersionId, safe='') + '/' + parse.quote(chosenDescriptorType, safe='') + '/files'
# Detecting whether RO-Crate trick will work
if self.trs_endpoint_meta.get('organization',{}).get('name') == 'WorkflowHub':
self.logger.debug("WorkflowHub workflow")
# And this is the moment where the RO-Crate must be fetched
roCrateURL = toolFilesURL + '?' + parse.urlencode({'format': 'zip'})
return self.getWorkflowRepoFromROCrateURL(roCrateURL,
expectedEngineDesc=self.RECOGNIZED_TRS_DESCRIPTORS[
chosenDescriptorType], offline=offline)
else:
self.logger.debug("TRS workflow")
# Learning the available files and maybe
# which is the entrypoint to the workflow
_ , trsFilesDir , trsFilesMeta = self.cacheHandler.fetch(INTERNAL_TRS_SCHEME_PREFIX + ':' + toolFilesURL, self.cacheTRSFilesDir, offline)
expectedEngineDesc = self.RECOGNIZED_TRS_DESCRIPTORS[chosenDescriptorType]
remote_workflow_entrypoint = trsFilesMeta[0].metadata.get('remote_workflow_entrypoint')
if remote_workflow_entrypoint is not None:
# Give it a chance to identify the original repo of the workflow
repoURL, repoTag, repoRelPath = self.guessRepoParams(remote_workflow_entrypoint, fail_ok=False)
if repoURL is not None:
self.logger.debug("Derived repository {} ({} , rel {}) from {}".format(repoURL, repoTag, repoRelPath, trs_tools_url))
return expectedEngineDesc , repoURL, repoTag, repoRelPath
workflow_entrypoint = trsFilesMeta[0].metadata.get('workflow_entrypoint')
if workflow_entrypoint is not None:
self.logger.debug("Using raw files from TRS tool {}".format(trs_tools_url))
repoDir = trsFilesDir
repoRelPath = workflow_entrypoint
return expectedEngineDesc , repoDir, None, repoRelPath
raise WFException("Unable to find a workflow in {}".format(trs_tools_url))
def getWorkflowRepoFromROCrateURL(self, roCrateURL, expectedEngineDesc: WorkflowType = None, offline: bool = False) -> Tuple[WorkflowType, RepoURL, RepoTag, RelPath]:
"""
:param roCrateURL:
:param expectedEngineDesc: If defined, an instance of WorkflowType
:return:
"""
roCrateFile = self.downloadROcrate(roCrateURL, offline=offline)
self.logger.info("downloaded RO-Crate: {} -> {}".format(roCrateURL, roCrateFile))
return self.getWorkflowRepoFromROCrateFile(roCrateFile, expectedEngineDesc)
def getWorkflowRepoFromROCrateFile(self, roCrateFile: AbsPath, expectedEngineDesc: WorkflowType = None) -> Tuple[WorkflowType, RepoURL, RepoTag, RelPath]:
"""
:param roCrateFile:
:param expectedEngineDesc: If defined, an instance of WorkflowType
:return:
"""
roCrateObj = rocrate.ROCrate(roCrateFile)
# TODO: get roCrateObj mainEntity programming language
# self.logger.debug(roCrateObj.root_dataset.as_jsonld())
mainEntityProgrammingLanguageId = None
mainEntityProgrammingLanguageUrl = None
mainEntityIdHolder = None
mainEntityId = None
workflowPID = None
workflowUploadURL = None
workflowTypeId = None
for e in roCrateObj.get_entities():
if (mainEntityIdHolder is None) and e['@type'] == 'CreativeWork' and '.json' in e['@id']:
mainEntityIdHolder = e.as_jsonld()['about']['@id']
elif e['@id'] == mainEntityIdHolder:
eAsLD = e.as_jsonld()
mainEntityId = eAsLD['mainEntity']['@id']
workflowPID = eAsLD.get('identifier')
elif e['@id'] == mainEntityId:
eAsLD = e.as_jsonld()
workflowUploadURL = eAsLD.get('url')
workflowTypeId = eAsLD['programmingLanguage']['@id']
elif e['@id'] == workflowTypeId:
# A bit dirty, but it works
eAsLD = e.as_jsonld()
mainEntityProgrammingLanguageId = eAsLD.get('identifier', {}).get('@id')
mainEntityProgrammingLanguageUrl = eAsLD.get('url', {}).get('@id')
# Now, it is time to match the language id
engineDescById = None
engineDescByUrl = None
for possibleEngineDesc in self.WORKFLOW_ENGINES:
if (engineDescById is None) and (mainEntityProgrammingLanguageId is not None):
for pat in possibleEngineDesc.uriMatch:
if isinstance(pat, Pattern):
match = pat.search(mainEntityProgrammingLanguageId)
if match:
engineDescById = possibleEngineDesc
elif pat == mainEntityProgrammingLanguageId:
engineDescById = possibleEngineDesc
if (engineDescByUrl is None) and (mainEntityProgrammingLanguageUrl == possibleEngineDesc.url):
engineDescByUrl = possibleEngineDesc
engineDesc = None
if engineDescById is not None:
engineDesc = engineDescById
elif engineDescByUrl is not None:
engineDesc = engineDescByUrl
else:
raise WFException(
'Found programming language {} (url {}) in RO-Crate manifest is not among the acknowledged ones'.format(
mainEntityProgrammingLanguageId, mainEntityProgrammingLanguageUrl))
if (engineDescById is not None) and (engineDescByUrl is not None) and engineDescById != engineDescByUrl:
self.logger.warning('Found programming language {} (url {}) leads to different engines'.format(
mainEntityProgrammingLanguageId, mainEntityProgrammingLanguageUrl))
if (expectedEngineDesc is not None) and engineDesc != expectedEngineDesc:
raise WFException(
'Expected programming language {} does not match identified one {} in RO-Crate manifest'.format(
expectedEngineDesc.engineName, engineDesc.engineName))
# This workflow URL, in the case of github, can provide the repo,
# the branch/tag/checkout , and the relative directory in the
# fetched content (needed by Nextflow)
# Some RO-Crates might have this value missing or ill-built
if workflowUploadURL is not None:
repoURL, repoTag, repoRelPath = self.guessRepoParams(workflowUploadURL, fail_ok=False)
if repoURL is None:
repoURL, repoTag, repoRelPath = self.guessRepoParams(roCrateObj.root_dataset['isBasedOn'], fail_ok=False)
if repoURL is None:
raise WFException('Unable to guess repository from RO-Crate manifest')
# It must return four elements:
return engineDesc, repoURL, repoTag, repoRelPath
def guessRepoParams(self, wf_url: Union[URIType, parse.ParseResult], fail_ok: bool = True) -> Tuple[RepoURL, RepoTag, RelPath]:
repoURL = None
repoTag = None
repoRelPath = None
# Deciding which is the input
if isinstance(wf_url, parse.ParseResult):
parsed_wf_url = wf_url
else:
parsed_wf_url = parse.urlparse(wf_url)
# These are the usual URIs which can be understood by pip
# See https://pip.pypa.io/en/stable/cli/pip_install/#git
if parsed_wf_url.scheme.startswith('git+') or parsed_wf_url.scheme == 'git':
# Getting the scheme git is going to understand
if len(parsed_wf_url.scheme) > 3:
gitScheme = parsed_wf_url.scheme[4:]
else:
gitScheme = parsed_wf_url.scheme
# Getting the tag or branch
if '@' in parsed_wf_url.path:
gitPath , repoTag = parsed_wf_url.path.split('@',1)
else:
gitPath = parsed_wf_url.path
# Getting the repoRelPath (if available)
if len(parsed_wf_url.fragment) > 0:
frag_qs = parse.parse_qs(parsed_wf_url.fragment)
subDirArr = frag_qs.get('subdirectory',[])
if len(subDirArr) > 0:
repoRelPath = subDirArr[0]
# Now, reassemble the repoURL
repoURL = parse.urlunparse((gitScheme, parsed_wf_url.netloc, gitPath, '', '', ''))
# TODO handling other popular cases, like bitbucket
elif parsed_wf_url.netloc == 'github.com':
wf_path = parsed_wf_url.path.split('/')
if len(wf_path) >= 3:
repoGitPath = wf_path[:3]
if not repoGitPath[-1].endswith('.git'):
repoGitPath[-1] += '.git'
# Rebuilding repo git path
repoURL = parse.urlunparse(
(parsed_wf_url.scheme, parsed_wf_url.netloc, '/'.join(repoGitPath), '', '', ''))
# And now, guessing the tag and the relative path
if len(wf_path) >= 5 and (wf_path[3] in ('blob', 'tree')):
repoTag = wf_path[4]
if len(wf_path) >= 6:
repoRelPath = '/'.join(wf_path[5:])
elif parsed_wf_url.netloc == 'raw.githubusercontent.com':
wf_path = parsed_wf_url.path.split('/')
if len(wf_path) >= 3:
# Rebuilding it
repoGitPath = wf_path[:3]
repoGitPath[-1] += '.git'
# Rebuilding repo git path
repoURL = parse.urlunparse(
('https', 'github.com', '/'.join(repoGitPath), '', '', ''))
# And now, guessing the tag/checkout and the relative path
if len(wf_path) >= 4:
repoTag = wf_path[3]
if len(wf_path) >= 5:
repoRelPath = '/'.join(wf_path[4:])
elif fail_ok:
raise WFException("FIXME: Unsupported http(s) git repository {}".format(wf_url))
self.logger.debug("From {} was derived {} {} {}".format(wf_url, repoURL, repoTag, repoRelPath))
return repoURL, repoTag, repoRelPath
def downloadROcrate(self, roCrateURL, offline: bool = False) -> AbsPath:
"""
Download RO-crate from WorkflowHub (https://dev.workflowhub.eu/)
using GA4GH TRS API and save RO-Crate in path.
:param roCrateURL: location path to save RO-Crate
:param offline: Are we in offline mode?
:type roCrateURL: str
:type offline: bool
:return:
"""
try:
roCK , roCrateFile , _ = self.cacheHandler.fetch(roCrateURL, self.cacheROCrateDir, offline)
except Exception as e:
raise WFException("Cannot download RO-Crate from {}, {}".format(roCrateURL, e))
crate_hashed_id = hashlib.sha1(roCrateURL.encode('utf-8')).hexdigest()
cachedFilename = os.path.join(self.cacheROCrateDir, crate_hashed_id + self.DEFAULT_RO_EXTENSION)
if not os.path.exists(cachedFilename):
os.symlink(os.path.basename(roCrateFile),cachedFilename)
self.cacheROCrateFilename = cachedFilename
return cachedFilename
def downloadInputFile(self, remote_file, workflowInputs_destdir: AbsPath = None,
contextName=None, offline: bool = False, ignoreCache:bool=False, registerInCache:bool=True) -> MaterializedContent:
"""
Download remote file or directory / dataset.
:param remote_file: URL or CURIE to download remote file
:param contextName:
:param workflowInputs_destdir:
:param offline:
:type remote_file: str
"""
parsedInputURL = parse.urlparse(remote_file)
if not all([parsedInputURL.scheme, parsedInputURL.path]):
raise RuntimeError("Input is not a valid remote URL or CURIE source")
else:
prettyFilename = parsedInputURL.path.split('/')[-1]
# Assure workflow inputs directory exists before the next step
if workflowInputs_destdir is None:
workflowInputs_destdir = self.cacheWorkflowInputsDir
self.logger.info("downloading workflow input: {}".format(remote_file))
# Security context is obtained here
secContext = None
if contextName is not None:
secContext = self.creds_config.get(contextName)
if secContext is None:
raise WFException(
'No security context {} is available, needed by {}'.format(contextName, remote_file))
inputKind, cachedFilename, metadata_array = self.cacheHandler.fetch(remote_file, workflowInputs_destdir, offline, ignoreCache, registerInCache, secContext)
self.logger.info("downloaded workflow input: {} => {}".format(remote_file, cachedFilename))
return MaterializedContent(cachedFilename, remote_file, prettyFilename, inputKind, metadata_array)
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,006
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/container.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import tempfile
import atexit
import shutil
import abc
import logging
from typing import Dict, List, Tuple
from collections import namedtuple
from .common import *
class ContainerFactoryException(Exception):
"""
Exceptions fired by instances of ContainerFactory
"""
pass
class ContainerFactory(abc.ABC):
def __init__(self, cacheDir=None, local_config=None, engine_name='unset', tempDir=None):
"""
Abstract init method
"""
if local_config is None:
local_config = dict()
self.local_config = local_config
# Getting a logger focused on specific classes
self.logger = logging.getLogger(self.__class__.__name__)
# cacheDir
if cacheDir is None:
cacheDir = local_config.get('cacheDir')
if cacheDir:
os.makedirs(cacheDir, exist_ok=True)
else:
cacheDir = tempfile.mkdtemp(prefix='wfexs', suffix='backend')
# Assuring this temporal directory is removed at the end
atexit.register(shutil.rmtree, cacheDir)
if tempDir is None:
tempDir = tempfile.mkdtemp(prefix='WfExS-container', suffix='tempdir')
# Assuring this temporal directory is removed at the end
atexit.register(shutil.rmtree, tempDir)
# This directory might be needed by temporary processes, like
# image materialization in singularity or podman
self.tempDir = tempDir
# But, for materialized containers, we should use common directories
# This for the containers themselves
self.containersCacheDir = os.path.join(cacheDir, 'containers', self.__class__.__name__)
# This for the symlinks to the containers, following the engine convention
self.engineContainersSymlinkDir = os.path.join(self.containersCacheDir, engine_name)
os.makedirs(self.engineContainersSymlinkDir, exist_ok=True)
# This variable contains the dictionary of set up environment
# variables needed to run the tool with the proper setup
self._environment = dict()
# This variable contains the set of optional features
# supported by this container factory in this installation
self._features = set()
self.runtime_cmd = None
# Detecting host userns support
host_userns_supported = False
if os.path.lexists('/proc/self/ns/user'):
host_userns_supported = True
self._features.add('host_userns')
else:
self.logger.warning('Host does not support userns (needed for encrypted working directories in several container technologies)')
self.logger.debug(f'Host supports userns: {host_userns_supported}')
@classmethod
@abc.abstractmethod
def ContainerType(cls) -> ContainerType:
pass
@property
def environment(self) -> Dict[str, str]:
return self._environment
@property
def containerType(self) -> ContainerType:
return self.ContainerType()
@property
def command(self) -> str:
return self.runtime_cmd
@property
def cacheDir(self) -> AbsPath:
"""
This method returns the symlink dir instead of the cache dir
as the entries following the naming convention of the engine
are placed in the symlink dir
"""
return self.engineContainersSymlinkDir
@abc.abstractmethod
def materializeContainers(self, tagList: List[ContainerTaggedName], simpleFileNameMethod: ContainerFileNamingMethod, offline: bool = False) -> List[Container]:
"""
It is assured the containers are materialized
"""
pass
def supportsFeature(self, feat : str) -> bool:
"""
Checking whether some feature is supported by this container
factory in this installation. Currently userns
"""
return feat in self._features
class NoContainerFactory(ContainerFactory):
"""
The 'no container approach', for development and local installed software
"""
#def __init__(self, cacheDir=None, local_config=None, engine_name='unset'):
# super().__init__(cacheDir=cacheDir, local_config=local_config, engine_name=engine_name)
@classmethod
def ContainerType(cls) -> ContainerType:
return ContainerType.NoContainer
def materializeContainers(self, tagList: List[ContainerTaggedName], simpleFileNameMethod: ContainerFileNamingMethod, offline: bool = False) -> List[Container]:
"""
It is assured the no-containers are materialized
i.e. it is a no-op
"""
return []
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,007
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/cwl_engine.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import logging
import re
import shutil
import stat
import subprocess
import tempfile
import venv
from typing import Set, Tuple
import jsonpath_ng
import jsonpath_ng.ext
import yaml
from .common import *
from .engine import WORKDIR_STDOUT_FILE, WORKDIR_STDERR_FILE, STATS_DAG_DOT_FILE
from .engine import WorkflowEngine, WorkflowEngineException
# Next methods are borrowed from
# https://github.com/common-workflow-language/cwltool/blob/5bdb3d3dd47d8d1b3a1685220b4b6ce0f94c055e/cwltool/singularity.py#L83
def _normalize_image_id(string: str) -> str:
return string.replace("/", "_") + ".img"
def _normalize_sif_id(string: str) -> str:
return string.replace("/", "_") + ".sif"
class CWLWorkflowEngine(WorkflowEngine):
CWLTOOL_PYTHON_PACKAGE = 'cwltool'
CWL_UTILS_PYTHON_PACKAGE = 'cwl-utils'
SCHEMA_SALAD_PYTHON_PACKAGE = 'schema-salad'
CWL_REPO = 'https://github.com/common-workflow-language/'
CWLTOOL_REPO = CWL_REPO + CWLTOOL_PYTHON_PACKAGE
CWL_UTILS_REPO = CWL_REPO + CWL_UTILS_PYTHON_PACKAGE
DEFAULT_CWLTOOL_VERSION = '3.1.20210628163208'
DEVEL_CWLTOOL_PACKAGE = f'git+{CWLTOOL_REPO}.git'
# Set this constant to something meaningful only when a hotfix
# between releases is needed
#DEVEL_CWLTOOL_VERSION = 'ed9dd4c3472e940a52dfe90049895f470bfd7329'
DEVEL_CWLTOOL_VERSION = None
DEFAULT_CWL_UTILS_VERSION = 'v0.10'
DEFAULT_SCHEMA_SALAD_VERSION = '8.1.20210627200047'
NODEJS_SINGULARITY_WRAPPER = 'nodejs_singularity_wrapper.bash'
ENGINE_NAME = 'cwl'
SUPPORTED_CONTAINER_TYPES = {
ContainerType.NoContainer,
ContainerType.Singularity,
ContainerType.Docker,
# ContainerType.Podman,
}
SUPPORTED_SECURE_EXEC_CONTAINER_TYPES = {
ContainerType.NoContainer,
ContainerType.Singularity,
# ContainerType.Podman,
}
def __init__(self,
cacheDir=None,
workflow_config=None,
local_config=None,
engineTweaksDir=None,
cacheWorkflowDir=None,
cacheWorkflowInputsDir=None,
workDir=None,
outputsDir=None,
outputMetaDir=None,
intermediateDir=None,
tempDir=None,
secure_exec : bool = False,
allowOther : bool = False,
config_directory=None
):
super().__init__(cacheDir=cacheDir, workflow_config=workflow_config, local_config=local_config,
engineTweaksDir=engineTweaksDir, cacheWorkflowDir=cacheWorkflowDir,
cacheWorkflowInputsDir=cacheWorkflowInputsDir,
workDir=workDir, outputsDir=outputsDir, intermediateDir=intermediateDir,
outputMetaDir=outputMetaDir, tempDir=tempDir, secure_exec=secure_exec,
allowOther=allowOther, config_directory=config_directory)
# Getting a fixed version of the engine
toolsSect = local_config.get('tools', {})
engineConf = toolsSect.get(self.ENGINE_NAME, {})
workflowEngineConf = workflow_config.get(self.ENGINE_NAME, {})
cwl_version = workflowEngineConf.get('version')
if cwl_version is None:
cwl_version = engineConf.get('version', self.DEFAULT_CWLTOOL_VERSION)
self.cwl_version = cwl_version
# Setting up packed directory
self.cacheWorkflowPackDir = os.path.join(self.cacheWorkflowDir, 'wf-pack')
os.makedirs(self.cacheWorkflowPackDir, exist_ok=True)
@classmethod
def WorkflowType(cls) -> WorkflowType:
return WorkflowType(
engineName=cls.ENGINE_NAME,
name='Common Workflow Language',
clazz=cls,
uriMatch=[re.compile(r'^https://w3id\.org/cwl/')],
uriTemplate=r'https://w3id.org/cwl/{}/',
url='https://www.commonwl.org/',
trs_descriptor='CWL',
rocrate_programming_language='#cwl'
)
@classmethod
def SupportedContainerTypes(cls) -> Set[ContainerType]:
return cls.SUPPORTED_CONTAINER_TYPES
@classmethod
def SupportedSecureExecContainerTypes(cls) -> Set[ContainerType]:
return cls.SUPPORTED_SECURE_EXEC_CONTAINER_TYPES
def identifyWorkflow(self, localWf: LocalWorkflow, engineVer: EngineVersion = None) -> Tuple[EngineVersion, LocalWorkflow]:
"""
This method should return the effective engine version needed
to run it when this workflow engine recognizes the workflow type
"""
cwlPath = localWf.dir
if localWf.relPath is not None:
cwlPath = os.path.join(cwlPath, localWf.relPath)
# Is this a yaml?
cwlVersion = None
try:
with open(cwlPath, mode="r", encoding="utf-8") as pCWL:
wf_yaml = yaml.safe_load(pCWL) # parse possible CWL
cwlVersion = wf_yaml.get('cwlVersion')
except Exception as e:
self.logger.warning('Unable to process CWL entrypoint {} {}'.format(cwlPath, e))
if cwlVersion is None:
return None, None
newLocalWf = LocalWorkflow(dir=localWf.dir, relPath=localWf.relPath,
effectiveCheckout=localWf.effectiveCheckout, langVersion=cwlVersion)
# TODO: Check best version of the engine
if localWf.relPath is not None:
engineVer = self.cwl_version
if engineVer is None:
engineVer = self.cwl_version
return engineVer, newLocalWf
def materializeEngineVersion(self, engineVersion: EngineVersion) -> Tuple[EngineVersion, EnginePath, Fingerprint]:
"""
Method to ensure the required engine version is materialized
It should raise an exception when the exact version is unavailable,
and no replacement could be fetched
"""
if self.engine_mode != EngineMode.Local:
raise WorkflowEngineException(
'Unsupported engine mode {} for {} engine'.format(self.engine_mode, self.ENGINE_NAME))
if self.DEVEL_CWLTOOL_VERSION is not None:
cwltoolPackage = self.DEVEL_CWLTOOL_PACKAGE
cwltoolMatchOp = '@'
engineVersion = self.DEVEL_CWLTOOL_VERSION
else:
cwltoolPackage = self.CWLTOOL_PYTHON_PACKAGE
cwltoolMatchOp = '=='
# A version directory is needed
cwl_install_dir = os.path.join(self.weCacheDir, engineVersion)
# Creating the virtual environment needed to separate CWL code
# from workflow execution backend
if not os.path.isdir(cwl_install_dir):
venv.create(cwl_install_dir, with_pip=True)
# Let's be sure the nodejs wrapper, needed by cwltool
# is in place
node_wrapper_source_path = os.path.join(self.payloadsDir, self.NODEJS_SINGULARITY_WRAPPER)
node_wrapper_inst_path = os.path.join(cwl_install_dir, 'bin', 'node')
if not os.path.isfile(node_wrapper_inst_path):
shutil.copy2(node_wrapper_source_path, node_wrapper_inst_path)
# Assuring it has the permissions
if not os.access(node_wrapper_inst_path, os.X_OK):
os.chmod(node_wrapper_inst_path, stat.S_IREAD | stat.S_IEXEC)
# And the symlink from nodejs to node
nodejs_wrapper_inst_path = os.path.join(cwl_install_dir, 'bin', 'nodejs')
if not os.path.islink(nodejs_wrapper_inst_path):
os.symlink('node', nodejs_wrapper_inst_path)
# Now, time to run it
instEnv = dict(os.environ)
with tempfile.NamedTemporaryFile() as cwl_install_stdout:
with tempfile.NamedTemporaryFile() as cwl_install_stderr:
retVal = subprocess.Popen(
". '{0}'/bin/activate && pip install --upgrade pip wheel ; pip install {1}=={2} {3}{4}{5}".format(
cwl_install_dir,
self.SCHEMA_SALAD_PYTHON_PACKAGE, self.DEFAULT_SCHEMA_SALAD_VERSION,
cwltoolPackage, cwltoolMatchOp, engineVersion,
# Commented out, as WfExS is not currently using cwl-utils
# self.CWL_UTILS_PYTHON_PACKAGE, self.DEFAULT_CWL_UTILS_VERSION,
),
stdout=cwl_install_stdout,
stderr=cwl_install_stderr,
cwd=cwl_install_dir,
shell=True,
env=instEnv
).wait()
# Proper error handling
if retVal != 0:
# Reading the output and error for the report
with open(cwl_install_stdout.name, "r") as c_stF:
cwl_install_stdout_v = c_stF.read()
with open(cwl_install_stderr.name, "r") as c_stF:
cwl_install_stderr_v = c_stF.read()
errstr = "Could not install CWL {} . Retval {}\n======\nSTDOUT\n======\n{}\n======\nSTDERR\n======\n{}".format(
engineVersion, retVal, cwl_install_stdout_v, cwl_install_stderr_v)
raise WorkflowEngineException(errstr)
# TODO
return engineVersion, cwl_install_dir, ""
def materializeWorkflow(self, matWorkflowEngine: MaterializedWorkflowEngine, offline: bool = False) -> Tuple[MaterializedWorkflowEngine, List[ContainerTaggedName]]:
"""
Method to ensure the workflow has been materialized. It returns the
localWorkflow directory, as well as the list of containers.
For Nextflow it is usually a no-op, but for CWL it requires resolution.
"""
localWf = matWorkflowEngine.workflow
localWorkflowDir = localWf.dir
if os.path.isabs(localWf.relPath):
localWorkflowFile = localWf.relPath
else:
localWorkflowFile = os.path.join(localWorkflowDir, localWf.relPath)
engineVersion = matWorkflowEngine.version
# CWLWorkflowEngine directory is needed
cwl_install_dir = matWorkflowEngine.engine_path
if not os.path.isfile(localWorkflowFile):
raise WorkflowEngineException(
'CWL workflow {} has not been materialized.'.format(localWorkflowFile))
# Extract hashes directories from localWorkflow
localWorkflowUsedHashes_head, localWorkflowUsedHashes_tail = localWorkflowDir.split("/")[-2:]
# Setting up workflow packed name
localWorkflowPackedName = (
os.path.join(localWorkflowUsedHashes_head, localWorkflowUsedHashes_tail) + ".cwl").replace("/", "_")
packedLocalWorkflowFile = os.path.join(self.cacheWorkflowPackDir, localWorkflowPackedName)
# TODO: check whether the repo is newer than the packed file
if not os.path.isfile(packedLocalWorkflowFile) or os.path.getsize(packedLocalWorkflowFile) == 0:
if offline:
raise WFException(
"Cannot allow to materialize packed CWL workflow in offline mode. Risk to access external content.")
# Execute cwltool --pack
with open(packedLocalWorkflowFile, mode='wb') as packedH:
with tempfile.NamedTemporaryFile() as cwl_pack_stderr:
# Writing straight to the file
retVal = subprocess.Popen(
". '{0}'/bin/activate && cwltool --no-doc-cache --pack {1}".format(cwl_install_dir,
localWorkflowFile),
stdout=packedH,
stderr=cwl_pack_stderr,
cwd=cwl_install_dir,
shell=True
).wait()
# Proper error handling
if retVal != 0:
# Reading the output and error for the report
with open(cwl_pack_stderr.name, "r") as c_stF:
cwl_pack_stderr_v = c_stF.read()
errstr = "Could not pack CWL running cwltool --pack {}. Retval {}\n======\nSTDERR\n======\n{}".format(
engineVersion, retVal, cwl_pack_stderr_v)
raise WorkflowEngineException(errstr)
containerTags = set()
# Getting the identifiers
cwlVersion = None
with open(packedLocalWorkflowFile, encoding='utf-8') as pLWH:
wf_yaml = yaml.safe_load(pLWH) # parse packed CWL
cwlVersion = wf_yaml.get('cwlVersion', 'v1.0')
dockerExprParser = jsonpath_ng.ext.parse('$."$graph"..requirements[?class = "DockerRequirement"][*]')
for match in dockerExprParser.find(wf_yaml):
dockerPullId = match.value.get('dockerPull')
# Fallback to dockerImageId if dockerPull was not set
# https://www.commonwl.org/v1.0/CommandLineTool.html#DockerRequirement
if dockerPullId is None:
dockerPullId = match.value.get('dockerImageId')
# TODO: treat other cases like dockerImport or dockerLoad?
containerTags.add(dockerPullId)
newLocalWf = LocalWorkflow(dir=localWf.dir, relPath=packedLocalWorkflowFile,
effectiveCheckout=localWf.effectiveCheckout, langVersion=cwlVersion)
newWfEngine = MaterializedWorkflowEngine(
instance=matWorkflowEngine.instance,
version=engineVersion,
fingerprint=matWorkflowEngine.fingerprint,
engine_path=cwl_install_dir,
workflow=newLocalWf
)
return newWfEngine, list(containerTags)
def simpleContainerFileName(self, imageUrl: URIType) -> RelPath:
"""
This method was borrowed from
https://github.com/common-workflow-language/cwltool/blob/5bdb3d3dd47d8d1b3a1685220b4b6ce0f94c055e/cwltool/singularity.py#L107
"""
# match = re.search(
# pattern=r"([a-z]*://)", string=imageUrl
# )
img_name = _normalize_image_id(imageUrl)
# candidates.append(img_name)
# sif_name = _normalize_sif_id(dockerRequirement["dockerPull"])
# candidates.append(sif_name)
return img_name
@staticmethod
def generateDotWorkflow(matWfEng: MaterializedWorkflowEngine, dagFile: AbsPath) -> None:
localWf = matWfEng.workflow
if os.path.isabs(localWf.relPath):
localWorkflowFile = localWf.relPath
else:
localWorkflowFile = os.path.join(localWf.dir, localWf.relPath)
engineVersion = matWfEng.version
cwl_install_dir = matWfEng.engine_path
# Execute cwltool --print-dot
with open(dagFile, mode='wb') as packedH:
with tempfile.NamedTemporaryFile() as cwl_dot_stderr:
# Writing straight to the file
retVal = subprocess.Popen(
". '{0}'/bin/activate && cwltool --print-dot {1}".format(cwl_install_dir, localWorkflowFile),
stdout=packedH,
stderr=cwl_dot_stderr,
cwd=cwl_install_dir,
shell=True
).wait()
# Proper error handling
if retVal != 0:
# Reading the output and error for the report
cwl_dot_stderr.seek(0)
cwl_dot_stderr_v = cwl_dot_stderr.read().decode('utf-8', errors='ignore')
errstr = "Could not generate CWL representation in dot format using cwltool --print-dot {}. Retval {}\n======\nSTDERR\n======\n{}".format(
engineVersion, retVal, cwl_dot_stderr_v)
raise WorkflowEngineException(errstr)
def launchWorkflow(self, matWfEng: MaterializedWorkflowEngine, matInputs: List[MaterializedInput],
outputs: List[ExpectedOutput]) -> Tuple[ExitVal, List[MaterializedInput], List[MaterializedOutput]]:
"""
Method to execute the workflow
"""
localWf = matWfEng.workflow
if os.path.isabs(localWf.relPath):
localWorkflowFile = localWf.relPath
else:
localWorkflowFile = os.path.join(localWf.dir, localWf.relPath)
engineVersion = matWfEng.version
dagFile = os.path.join(self.outputStatsDir, STATS_DAG_DOT_FILE)
if os.path.exists(localWorkflowFile):
# CWLWorkflowEngine directory is needed
cwl_install_dir = matWfEng.engine_path
# First, generate the graphical representation of the workflow
self.generateDotWorkflow(matWfEng, dagFile)
# Then, all the preparations
cwl_dict_inputs = dict()
with open(localWorkflowFile, "r") as cwl_file:
cwl_yaml = yaml.safe_load(cwl_file) # convert packed CWL to YAML
# As the workflow has been packed, the #main element appears
io_parser = jsonpath_ng.ext.parse('$."$graph"[?class = "Workflow"]')
workflows = dict()
first_workflow = None
for match in io_parser.find(cwl_yaml):
wf = match.value
wfId = wf.get('id')
wfIdPrefix = '' if wfId is None else wfId + '/'
wf_cwl_yaml_inputs = wf.get('inputs', [])
wf_cwl_yaml_outputs = wf.get('outputs', [])
workflow = (wfId, wfIdPrefix, wf_cwl_yaml_inputs, wf_cwl_yaml_outputs)
workflows[wfId] = workflow
if first_workflow is None:
first_workflow = workflow
# Now, deciding
workflow = None
if first_workflow is None:
raise WorkflowEngineException(f"FIXME?: No workflow was found in {localWorkflowFile}")
elif len(workflows) > 1 and '#main' in workflows:
# TODO: have a look at cwltool code and more workflows,
# to be sure this heuristic is valid
workflow = workflows['#main']
else:
workflow = first_workflow
wfId, wfIdPrefix, cwl_yaml_inputs, cwl_yaml_outputs = workflow
# Setting packed CWL inputs (id, type)
for cwl_yaml_input in cwl_yaml_inputs: # clean string of packed CWL inputs
cwl_yaml_input_id = str(cwl_yaml_input['id'])
# Validating
if cwl_yaml_input_id.startswith(wfIdPrefix):
inputId = cwl_yaml_input_id[len(wfIdPrefix):]
elif cwl_yaml_input_id[0] == '#':
inputId = cwl_yaml_input_id[1:]
else:
inputId = cwl_yaml_input_id
if inputId not in cwl_dict_inputs:
cwl_dict_inputs[inputId] = cwl_yaml_input
# TODO change the hardcoded filename
inputsFileName = "inputdeclarations.yaml"
yamlFile = os.path.join(self.workDir, inputsFileName)
try:
# Create YAML file
augmentedInputs = self.createYAMLFile(matInputs, cwl_dict_inputs, yamlFile)
if os.path.isfile(yamlFile):
# Execute workflow
stdoutFilename = os.path.join(self.outputMetaDir, WORKDIR_STDOUT_FILE)
stderrFilename = os.path.join(self.outputMetaDir, WORKDIR_STDERR_FILE)
with open(stdoutFilename, mode="ab+") as cwl_yaml_stdout:
with open(stderrFilename, mode="ab+") as cwl_yaml_stderr:
intermediateDir = self.intermediateDir + "/"
outputDir = self.outputsDir + "/"
# This is needed to isolate execution environment
# and teach cwltool where to find the cached images
instEnv = dict()
# These variables are needed to have the installation working
# so external commands like singularity or docker can be found
for envKey in ('LD_LIBRARY_PATH','PATH'):
valToSet = os.environ.get(envKey)
if valToSet is not None:
instEnv[envKey] = valToSet
instEnv.update(self.container_factory.environment)
debugFlag = ''
if self.logger.getEffectiveLevel() <= logging.DEBUG:
debugFlag = '--debug'
elif self.logger.getEffectiveLevel() <= logging.INFO:
debugFlag = '--verbose'
if self.container_factory.containerType == ContainerType.Singularity:
cmdTemplate = "cwltool --outdir {0} {4} --strict --no-doc-cache --disable-pull --singularity --tmp-outdir-prefix={1} --tmpdir-prefix={1} {2} {3}"
instEnv['CWL_SINGULARITY_CACHE'] = self.container_factory.cacheDir
instEnv['SINGULARITY_CONTAIN'] = '1'
if self.writable_containers:
instEnv['SINGULARITY_WRITABLE'] = '1'
elif self.container_factory.containerType == ContainerType.Docker:
cmdTemplate = "cwltool --outdir {0} {4} --strict --no-doc-cache --disable-pull --tmp-outdir-prefix={1} --tmpdir-prefix={1} {2} {3}"
elif self.container_factory.containerType == ContainerType.Podman:
if self.container_factory.supportsFeature('userns'):
instEnv['PODMAN_USERNS'] = 'keep-id'
cmdTemplate = "cwltool --outdir {0} {4} --strict --no-doc-cache --disable-pull '--user-space-docker-cmd=" + self.container_factory.command + "' --tmp-outdir-prefix={1} --tmpdir-prefix={1} {2} {3}"
elif self.container_factory.containerType == ContainerType.NoContainer:
cmdTemplate = "cwltool --outdir {0} {4} --strict --no-doc-cache --no-container --tmp-outdir-prefix={1} --tmpdir-prefix={1} {2} {3}"
else:
raise WorkflowEngineException("FATAL ERROR: Unsupported container factory {}".format(
self.container_factory.ContainerType()))
cmd = cmdTemplate.format(outputDir, intermediateDir, localWorkflowFile, yamlFile, debugFlag)
self.logger.debug("Command => {}".format(cmd))
retVal = subprocess.Popen(". '{0}'/bin/activate && {1}".format(cwl_install_dir, cmd),
stdout=cwl_yaml_stdout,
stderr=cwl_yaml_stderr,
cwd=self.workDir,
shell=True,
env=instEnv
).wait()
cwl_yaml_stdout.seek(0)
cwl_yaml_stdout_v = cwl_yaml_stdout.read().decode('utf-8', 'ignore')
# Proper error handling
try:
outputsMapping = json.loads(cwl_yaml_stdout_v)
cwl_yaml_stderr_v = ''
except json.JSONDecodeError as e:
outputsMapping = None
cwl_yaml_stderr_v = "Output cwltool JSON decode error: {}".format(e.msg)
if retVal > 125:
# Reading the error for the report
cwl_yaml_stderr.seek(0)
cwl_yaml_stderr_v += cwl_yaml_stderr.read().decode('utf-8', 'ignore')
errstr = "[CWL] Failed running cwltool {}. Retval {}\n======\nSTDOUT\n======\n{}\n======\nSTDERR\n======\n{}".format(
engineVersion, retVal, cwl_yaml_stdout_v, cwl_yaml_stderr_v)
raise WorkflowEngineException(errstr)
# Reading the output for the report
matOutputs = self.identifyMaterializedOutputs(outputs, self.outputsDir, outputsMapping)
# FIXME: create augmentedInputs properly
return retVal, matInputs, matOutputs
except WorkflowEngineException as wfex:
raise wfex
except Exception as error:
raise WorkflowEngineException(
"ERROR: cannot execute the workflow {}, {}".format(localWorkflowFile, error)
)
else:
raise WorkflowEngineException(
'CWL workflow {} has not been successfully materialized and packed for their execution'.format(
localWorkflowFile)
)
def createYAMLFile(self, matInputs, cwlInputs, filename):
"""
Method to create a YAML file that describes the execution inputs of the workflow
needed for their execution. Return parsed inputs.
"""
try:
execInputs = self.executionInputs(matInputs, cwlInputs)
if len(execInputs) != 0:
with open(filename, mode="w+", encoding="utf-8") as yaml_file:
yaml.dump(execInputs, yaml_file, allow_unicode=True, default_flow_style=False, sort_keys=False)
return execInputs
else:
raise WorkflowEngineException(
"Dict of execution inputs is empty")
except IOError as error:
raise WorkflowEngineException(
"ERROR: cannot create YAML file {}, {}".format(filename, error))
def executionInputs(self, matInputs: List[MaterializedInput], cwlInputs):
"""
Setting execution inputs needed to execute the workflow
"""
if len(matInputs) == 0: # Is list of materialized inputs empty?
raise WorkflowEngineException("FATAL ERROR: Execution with no inputs")
if len(cwlInputs) == 0: # Is list of declared inputs empty?
raise WorkflowEngineException("FATAL ERROR: Workflow with no declared inputs")
execInputs = dict()
for matInput in matInputs:
if isinstance(matInput, MaterializedInput): # input is a MaterializedInput
# numberOfInputs = len(matInput.values) # number of inputs inside a MaterializedInput
for input_value in matInput.values:
name = matInput.name
value_types = cwlInputs.get(name, {}).get('type')
if value_types is None:
raise WorkflowEngineException("ERROR: input {} not available in workflow".format(name))
if not isinstance(value_types, list):
value_types = [ value_types ]
value = input_value
for value_type in value_types:
classType = None
if isinstance(value_type, str):
classType = value_type
value_type = {
'type': classType
}
elif isinstance(value_type, dict):
classType = value_type['type']
else:
self.logger.debug("FIXME? value_type of class {}".format(value_type.__class__.__name__))
continue
isArray = False
if classType == 'null':
if value is not None:
continue
elif classType == 'array':
isArray = True
classType = value_type.get('items')
if classType is None:
raise WorkflowEngineException(
"ERROR: Ill formed array input type for {} in workflow definition: {}".format(
name, value_type))
#else: # the other types are managed below
if isinstance(value, MaterializedContent): # value of an input contains MaterializedContent
if value.kind in (ContentKind.Directory, ContentKind.File):
if not os.path.exists(value.local):
self.logger.warning("Input {} is not materialized".format(name))
value_local = value.local
if isArray:
execInputs.setdefault(name, []).append({"class": classType, "location": value_local})
elif name in execInputs:
raise WorkflowEngineException(
"ERROR: Input {} is not array, but it received more than one value".format(name))
else:
execInputs[name] = {"class": classType, "location": value_local}
else: # The error now is managed outside
# FIXME: do something better for other kinds
#
# raise WorkflowEngineException(
# "ERROR: Input {} has values of type {} this code does not know how to handle".format(
# name, value.kind))
continue
elif isArray:
# FIXME: apply additional validations
execInputs.setdefault(name, []).append(value)
else:
# FIXME: apply additional validations
execInputs[name] = value
break
else:
# If we reach this, no value was set up
raise WorkflowEngineException(
"ERROR: Input {} has value types {} for value of type {}, and this code does not know how to handle it (check types)".format(
name, value_types, value.kind))
return execInputs
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,008
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/fetchers/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import http.client
import io
import os
import paramiko
import paramiko.pkey
from paramiko.config import SSH_PORT as DEFAULT_SSH_PORT
import shutil
import stat
from typing import Any, List, Optional, Tuple, Union
from urllib import request, parse
import urllib.error
from ..common import *
from ..utils.ftp_downloader import FTPDownloader
def fetchClassicURL(remote_file:URIType, cachedFilename:Union[AbsPath, io.BytesIO], secContext:Optional[SecurityContextConfig]=None) -> Tuple[Union[URIType, ContentKind], List[URIWithMetadata]]:
"""
Method to fetch contents from http, https and ftp
:param remote_file:
:param cachedFilename:
:param secContext:
"""
headers = {}
method = None
orig_remote_file = remote_file
if isinstance(secContext, dict):
username = secContext.get('username')
password = secContext.get('password')
if username is not None:
if password is None:
password = ''
# Time to set up user and password in URL
parsedInputURL = parse.urlparse(remote_file)
netloc = parse.quote(username, safe='') + ':' + parse.quote(password,
safe='') + '@' + parsedInputURL.hostname
if parsedInputURL.port is not None:
netloc += ':' + str(parsedInputURL.port)
# Now the credentials are properly set up
remote_file = parse.urlunparse((parsedInputURL.scheme, netloc, parsedInputURL.path,
parsedInputURL.params, parsedInputURL.query, parsedInputURL.fragment))
method = secContext.get('method')
headers = secContext.get('headers', {})
# Preparing where it is going to be written
if isinstance(cachedFilename, (io.TextIOBase, io.BufferedIOBase, io.RawIOBase, io.IOBase)):
download_file = cachedFilename
else:
download_file = open(cachedFilename, 'wb')
uri_with_metadata = None
try:
req_remote = request.Request(remote_file, headers=headers, method=method)
with request.urlopen(req_remote) as url_response:
uri_with_metadata = URIWithMetadata(url_response.url, dict(url_response.headers.items()))
while True:
try:
# Try getting it
shutil.copyfileobj(url_response, download_file)
except http.client.IncompleteRead as icread:
download_file.write(icread.partial)
# Restarting the copy
continue
break
except urllib.error.HTTPError as he:
raise WFException("Error fetching {} : {} {}".format(orig_remote_file, he.code, he.reason))
finally:
# Closing files opened by this code
if download_file != cachedFilename:
download_file.close()
return ContentKind.File, [ uri_with_metadata ]
def fetchFTPURL(remote_file:URIType, cachedFilename:AbsPath, secContext:Optional[SecurityContextConfig]=None) -> Tuple[Union[URIType, ContentKind], List[URIWithMetadata]]:
"""
Method to fetch contents from ftp
:param remote_file:
:param cachedFilename:
:param secContext:
"""
parsedInputURL = parse.urlparse(remote_file)
kind = None
connParams = {
'HOST': parsedInputURL.hostname,
}
if parsedInputURL.port is not None:
connParams['PORT'] = parsedInputURL.port
if isinstance(secContext, dict):
connParams['USER'] = secContext.get('username')
connParams['PASSWORD'] = secContext.get('password')
ftp_client = FTPDownloader(**connParams)
retval = ftp_client.download(download_path=parsedInputURL.path, upload_path=cachedFilename)
if isinstance(retval, list):
kind = ContentKind.Directory
else:
kind = ContentKind.File
return kind, [ URIWithMetadata(remote_file, {}) ]
def sftpCopy(sftp:paramiko.SFTPClient, sshPath, localPath, sshStat=None) -> Tuple[Union[int,bool], ContentKind]:
if sshStat is None:
sshStat = sftp.stat(sshPath)
# Trios
transTrios = []
recur = []
kind = None
if stat.S_ISREG(sshStat.st_mode):
transTrios.append((sshPath, sshStat, localPath))
kind = ContentKind.File
elif stat.S_ISDIR(sshStat.st_mode):
# Recursive
os.makedirs(localPath, exist_ok=True)
recur = []
# List of remote files
for filename in sftp.listdir(sshPath):
rPath = os.path.join(sshPath, filename)
lPath = os.path.join(localPath, filename)
rStat = sftp.stat(rPath)
if stat.S_ISREG(rStat.st_mode):
transTrios.append((rPath, rStat, lPath))
elif stat.S_ISDIR(rStat.st_mode):
recur.append((rPath, rStat, lPath))
kind = ContentKind.Directory
else:
return False, None
# Now, transfer these
numCopied = 0
for remotePath, rStat, filename in transTrios:
sftp.get(remotePath, filename)
os.utime(filename, (rStat.st_atime, rStat.st_mtime))
numCopied += 1
# And recurse on these
for rDir, rStat, lDir in recur:
numCopied += sftpCopy(sftp, rDir, lDir, sshStat=rStat)
return numCopied, kind
# TODO: test this codepath
def fetchSSHURL(remote_file:URIType, cachedFilename:AbsPath, secContext:Optional[SecurityContextConfig]=None) -> Tuple[Union[URIType, ContentKind], List[URIWithMetadata]]:
"""
Method to fetch contents from ssh / sftp servers
:param remote_file:
:param cachedFilename: Destination filename for the fetched content
:param secContext: The security context containing the credentials
"""
# Sanitizing possible ill-formed inputs
if not isinstance(secContext, dict):
secContext = {}
parsedInputURL = parse.urlparse(remote_file)
# Although username and password could be obtained from URL, they are
# intentionally ignore in favour of security context
username = secContext.get('username')
password = secContext.get('password')
sshKey = secContext.get('key')
if (username is None) or ((password is None) and (sshKey is None)):
raise WFException("Cannot download content from {} without credentials".format(remote_file))
connBlock = {
'username': username,
}
if sshKey is not None:
pKey = paramiko.pkey.PKey(data=sshKey)
connBlock['pkey'] = pKey
else:
connBlock['password'] = password
sshHost = parsedInputURL.hostname
sshPort = parsedInputURL.port if parsedInputURL.port is not None else DEFAULT_SSH_PORT
sshPath = parsedInputURL.path
t = None
try:
t = paramiko.Transport((sshHost, sshPort))
# Performance reasons!
# t.window_size = 134217727
# t.use_compression()
t.connect(**connBlock)
sftp = paramiko.SFTPClient.from_transport(t)
_ , kind = sftpCopy(sftp,sshPath,cachedFilename)
return kind, [ URIWithMetadata(remote_file, {}) ]
finally:
# Closing the SFTP connection
if t is not None:
t.close()
def fetchFile(remote_file:URIType, cachedFilename:AbsPath, secContext:Optional[SecurityContextConfig]=None) -> Tuple[Union[URIType, ContentKind], List[URIWithMetadata]]:
"""
Method to fetch contents from local contents
:param remote_file:
:param cachedFilename: Destination filename for the fetched content
:param secContext: The security context containing the credentials
"""
parsedInputURL = parse.urlparse(remote_file)
localPath = parsedInputURL.path
if not os.path.exists(localPath):
raise WFException("Local path {} is not available".format(localPath))
kind = None
if os.path.isdir(localPath):
shutil.copytree(localPath, cachedFilename)
kind = ContentKind.Directory
elif os.path.isfile(localPath):
shutil.copy2(localPath, cachedFilename)
kind = ContentKind.File
else:
raise WFException("Local path {} is neither a file nor a directory".format(localPath))
return kind, [ URIWithMetadata(remote_file, {}) ]
DEFAULT_SCHEME_HANDLERS = {
'http': fetchClassicURL,
'https': fetchClassicURL,
'ftp': fetchFTPURL,
'sftp': fetchSSHURL,
'ssh': fetchSSHURL,
'file': fetchFile,
}
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,009
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/fetchers/trs_files.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import io
import json
from typing import List, Optional, Tuple
from urllib import request, parse
import urllib.error
from . import fetchClassicURL
from ..common import *
INTERNAL_TRS_SCHEME_PREFIX = 'wfexs.trs.files'
TRS_FILES_SUFFIX = '/files'
TRS_DESCRIPTOR_INFIX = '/descriptor/'
def fetchTRSFiles(remote_file:URIType, cachedFilename:AbsPath, secContext:Optional[SecurityContextConfig]=None) -> Tuple[Union[URIType, ContentKind], List[URIWithMetadata]]:
"""
Method to download contents from TRS files related to a tool
:param remote_file:
:param cachedFilename: Destination filename for the fetched content
:param secContext: The security context containing the credentials
"""
parsedInputURL = parse.urlparse(remote_file)
embedded_remote_file = parsedInputURL.path
if not embedded_remote_file.endswith(TRS_FILES_SUFFIX):
metadata_url = embedded_remote_file + TRS_FILES_SUFFIX
else:
metadata_url = embedded_remote_file
descriptor_base_url = embedded_remote_file[0:-len(TRS_FILES_SUFFIX)] + TRS_DESCRIPTOR_INFIX
topMeta = {
'fetched': metadata_url,
'workflow_entrypoint': None,
'remote_workflow_entrypoint': None
}
metadata_array = [
URIWithMetadata(remote_file, topMeta)
]
metaio = None
try:
metaio = io.BytesIO()
_ , metametaio = fetchClassicURL(metadata_url, metaio)
metadata = json.loads(metaio.getvalue().decode('utf-8'))
metadata_array.extend(metametaio)
metaio = None
except urllib.error.HTTPError as he:
raise WFException("Error fetching or processing TRS files metadata for {} : {} {}".format(remote_file, he.code, he.reason))
os.makedirs(cachedFilename, exist_ok=True)
absdirs = set()
emptyWorkflow = True
for file_desc in metadata:
file_rel_path = file_desc.get('path')
if file_rel_path is not None:
emptyWorkflow = False
file_url = descriptor_base_url + file_rel_path
absfile = os.path.join(cachedFilename, file_rel_path)
# Intermediate path creation
reldir = os.path.dirname(file_rel_path)
if len(reldir) > 0:
absdir = os.path.join(cachedFilename, reldir)
if absdir not in absdirs:
absdirs.add(absdir)
os.makedirs(absdir, exist_ok=True)
# it is fetched twice, one for the metadata,
if file_desc.get('file_type') == 'PRIMARY_DESCRIPTOR':
descriptorMeta = io.BytesIO()
_ , metaprimary = fetchClassicURL(file_url, descriptorMeta)
metadata_array.extend(metaprimary)
# This metadata can help a lot to get the workflow repo
metadataPD = json.loads(descriptorMeta.getvalue().decode('utf-8'))
topMeta['workflow_entrypoint'] = file_rel_path
topMeta['remote_workflow_entrypoint'] = metadataPD.get('url')
descriptorMeta = None
metadataPD = None
# and another for the raw content (in case no workflow repo is identified)
_ , metaelem = fetchClassicURL(file_url, absfile, {'headers': { 'Accept': 'text/plain' } })
metadata_array.extend(metaelem)
if emptyWorkflow:
raise WFException("Error processing TRS files for {} : no file was found.\n{}".format(remote_file, metadata))
return ContentKind.Directory, metadata_array
# These are schemes from identifiers.org
SCHEME_HANDLERS = {
INTERNAL_TRS_SCHEME_PREFIX: fetchTRSFiles,
}
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,010
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/singularity_container.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import os
import os.path
import re
import shutil
import subprocess
import tempfile
from urllib import parse
import uuid
from typing import Dict, List, Tuple
from .common import *
from .container import ContainerFactory, ContainerFactoryException
from .utils.docker import DockerHelper
class SingularityContainerFactory(ContainerFactory):
META_JSON_POSTFIX = '_meta.json'
def __init__(self, cacheDir=None, local_config=None, engine_name='unset', tempDir=None):
super().__init__(cacheDir=cacheDir, local_config=local_config, engine_name=engine_name, tempDir=tempDir)
self.runtime_cmd = local_config.get('tools', {}).get('singularityCommand', DEFAULT_SINGULARITY_CMD)
# This is needed due a bug in singularity 3.6, where
# singularity pull --disable-cache does not create a container
singularityCacheDir = os.path.join(self.containersCacheDir, '.singularity')
os.makedirs(singularityCacheDir, exist_ok=True)
self._environment.update({
'SINGULARITY_TMPDIR': self.tempDir,
'SINGULARITY_CACHEDIR': singularityCacheDir,
})
# Now, detect userns feature using some ideas from
# https://github.com/hpcng/singularity/issues/1445#issuecomment-381588444
userns_supported = False
if self.supportsFeature('host_userns'):
matEnv = dict(os.environ)
matEnv.update(self.environment)
with tempfile.NamedTemporaryFile() as s_out, tempfile.NamedTemporaryFile() as s_err:
s_retval = subprocess.Popen(
[self.runtime_cmd, 'exec', '--userns', '/etc', 'true'],
env=matEnv,
stdout=s_out,
stderr=s_err
).wait()
# The command always fails.
# We only need to find 'Failed to create user namespace'
# in order to discard this feature
with open(s_err.name,"r") as c_stF:
s_err_v = c_stF.read()
if 'Failed to create user namespace' not in s_err_v:
userns_supported = True
self._features.add('userns')
self.logger.debug(f'Singularity supports userns: {userns_supported}')
if not userns_supported:
self.logger.warning('Singularity does not support userns (needed for encrypted working directories)')
@classmethod
def ContainerType(cls) -> ContainerType:
return ContainerType.Singularity
def materializeContainers(self, tagList: List[ContainerTaggedName], simpleFileNameMethod: ContainerFileNamingMethod, offline: bool = False) -> List[Container]:
"""
It is assured the containers are materialized
"""
containersList = []
matEnv = dict(os.environ)
matEnv.update(self.environment)
dhelp = DockerHelper()
for tag in tagList:
# It is not an absolute URL, we are prepending the docker://
parsedTag = parse.urlparse(tag)
singTag = 'docker://' + tag if parsedTag.scheme == '' else tag
containerFilename = simpleFileNameMethod(tag)
localContainerPath = os.path.join(self.engineContainersSymlinkDir,containerFilename)
localContainerPathMeta = localContainerPath + self.META_JSON_POSTFIX
self.logger.info("downloading container: {} => {}".format(tag, localContainerPath))
# First, let's materialize the container image
imageSignature = None
tmpContainerPath = None
tmpContainerPathMeta = None
if os.path.isfile(localContainerPathMeta):
with open(localContainerPathMeta, mode="r", encoding="utf8") as tcpm:
metadata = json.load(tcpm)
registryServer = metadata['registryServer']
repo = metadata['repo']
alias = metadata['alias']
partial_fingerprint = metadata['dcd']
elif offline:
raise ContainerFactoryException("Cannot download containers metadata in offline mode from {} to {}".format(tag, localContainerPath))
else:
tmpContainerPath = os.path.join(self.containersCacheDir,str(uuid.uuid4()))
tmpContainerPathMeta = tmpContainerPath + self.META_JSON_POSTFIX
self.logger.debug("downloading temporary container metadata: {} => {}".format(tag, tmpContainerPathMeta))
with open(tmpContainerPathMeta, mode="w", encoding="utf8") as tcpm:
registryServer, repo, alias, partial_fingerprint = dhelp.query_tag(singTag)
json.dump({
'registryServer': registryServer,
'repo': repo,
'alias': alias,
'dcd': partial_fingerprint,
}, tcpm)
canonicalContainerPath = None
canonicalContainerPathMeta = None
if not os.path.isfile(localContainerPath):
if offline:
raise ContainerFactoryException("Cannot download containers in offline mode from {} to {}".format(tag, localContainerPath))
with tempfile.NamedTemporaryFile() as s_out, tempfile.NamedTemporaryFile() as s_err:
if tmpContainerPath is None:
tmpContainerPath = os.path.join(self.containersCacheDir,str(uuid.uuid4()))
self.logger.debug("downloading temporary container: {} => {}".format(tag, tmpContainerPath))
# Singularity command line borrowed from
# https://github.com/nextflow-io/nextflow/blob/539a22b68c114c94eaf4a88ea8d26b7bfe2d0c39/modules/nextflow/src/main/groovy/nextflow/container/SingularityCache.groovy#L221
s_retval = subprocess.Popen(
[self.runtime_cmd, 'pull', '--name', tmpContainerPath, singTag],
env=matEnv,
stdout=s_out,
stderr=s_err
).wait()
self.logger.debug("singularity pull retval: {}".format(s_retval))
with open(s_out.name,"r") as c_stF:
s_out_v = c_stF.read()
with open(s_err.name,"r") as c_stF:
s_err_v = c_stF.read()
self.logger.debug("singularity pull stdout: {}".format(s_out_v))
self.logger.debug("singularity pull stderr: {}".format(s_err_v))
# Reading the output and error for the report
if s_retval == 0:
if not os.path.exists(tmpContainerPath):
raise ContainerFactoryException("FATAL ERROR: Singularity finished properly but it did not materialize {} into {}".format(tag, tmpContainerPath))
imageSignature = ComputeDigestFromFile(tmpContainerPath)
# Some filesystems complain when filenames contain 'equal', 'slash' or 'plus' symbols
canonicalContainerPath = os.path.join(self.containersCacheDir, imageSignature.replace('=','~').replace('/','-').replace('+','_'))
if os.path.exists(canonicalContainerPath):
tmpSize = os.path.getsize(tmpContainerPath)
canonicalSize = os.path.getsize(canonicalContainerPath)
# Remove the temporary one
os.unlink(tmpContainerPath)
tmpContainerPath = None
if tmpContainerPathMeta is not None:
os.unlink(tmpContainerPathMeta)
tmpContainerPathMeta = None
if tmpSize != canonicalSize:
# If files were not the same complain
# This should not happen!!!!!
raise ContainerFactoryException("FATAL ERROR: Singularity cache collision for {}, with differing sizes ({} local, {} remote {})".format(imageSignature,canonicalSize,tmpSize,tag))
else:
shutil.move(tmpContainerPath, canonicalContainerPath)
tmpContainerPath = None
# Now, create the relative symbolic link
if os.path.lexists(localContainerPath):
os.unlink(localContainerPath)
os.symlink(os.path.relpath(canonicalContainerPath,self.engineContainersSymlinkDir),localContainerPath)
else:
errstr = """Could not materialize singularity image {}. Retval {}
======
STDOUT
======
{}
======
STDERR
======
{}""".format(singTag, s_retval, s_out_v, s_err_v)
if os.path.exists(tmpContainerPath):
try:
os.unlink(tmpContainerPath)
except:
pass
raise ContainerFactoryException(errstr)
# Only metadata was generated
if tmpContainerPathMeta is not None:
if canonicalContainerPath is None:
canonicalContainerPath = os.path.normpath(os.path.join(self.engineContainersSymlinkDir, os.readlink(localContainerPath)))
canonicalContainerPathMeta = canonicalContainerPath + self.META_JSON_POSTFIX
shutil.move(tmpContainerPathMeta, canonicalContainerPathMeta)
if canonicalContainerPathMeta is not None:
if os.path.lexists(localContainerPathMeta):
os.unlink(localContainerPathMeta)
os.symlink(os.path.relpath(canonicalContainerPathMeta,self.engineContainersSymlinkDir),localContainerPathMeta)
# Then, compute the signature
if imageSignature is None:
imageSignature = ComputeDigestFromFile(localContainerPath, repMethod=nihDigest)
containersList.append(
Container(
origTaggedName=tag,
taggedName=singTag,
signature=imageSignature,
fingerprint=repo + '@' + partial_fingerprint,
type=self.containerType,
localPath=localContainerPath
)
)
return containersList
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,011
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/encrypted_fs.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import subprocess
import tempfile
from .common import *
# This is needed to support different FUSE encryption filesystem implementations
class EncryptedFSType(enum.Enum):
EncFS = 'encfs'
GoCryptFS = 'gocryptfs'
DEFAULT_ENCRYPTED_FS_TYPE = EncryptedFSType.EncFS
DEFAULT_ENCRYPTED_FS_CMD = {
EncryptedFSType.EncFS: 'encfs',
EncryptedFSType.GoCryptFS: 'gocryptfs',
}
# Idle timeout, in minutes
DEFAULT_ENCRYPTED_FS_IDLE_TIMEOUT = 5
def _mountEncFS(encfs_cmd, encfs_idleMinutes, uniqueEncWorkDir, uniqueWorkDir, uniqueRawWorkDir, clearPass:str, allowOther:bool = False):
with tempfile.NamedTemporaryFile() as encfs_init_stdout, tempfile.NamedTemporaryFile() as encfs_init_stderr:
encfsCommand = [
encfs_cmd,
'-i',str(encfs_idleMinutes),
'--stdinpass',
'--standard',
uniqueEncWorkDir,
uniqueWorkDir
]
# This parameter can be a security hole
if allowOther:
encfsCommand.extend([
'--',
'-o',
'allow_other'
])
efs = subprocess.Popen(
encfsCommand,
stdin=subprocess.PIPE,
stdout=encfs_init_stdout,
stderr=encfs_init_stderr,
cwd=uniqueRawWorkDir,
)
efs.communicate(input=clearPass.encode('utf-8'))
retval = efs.wait()
# Reading the output and error for the report
if retval != 0:
with open(encfs_init_stdout.name,"r") as c_stF:
encfs_init_stdout_v = c_stF.read()
with open(encfs_init_stderr.name,"r") as c_stF:
encfs_init_stderr_v = c_stF.read()
errstr = "Could not init/mount encfs (retval {})\nCommand: {}\n======\nSTDOUT\n======\n{}\n======\nSTDERR\n======\n{}".format(retval,' '.join(encfsCommand),encfs_init_stdout_v,encfs_init_stderr_v)
raise WFException(errstr)
def _mountGoCryptFS(gocryptfs_cmd, gocryptfs_idleMinutes, uniqueEncWorkDir, uniqueWorkDir, uniqueRawWorkDir, clearPass:str, allowOther:bool = False):
with tempfile.NamedTemporaryFile() as gocryptfs_init_stdout, tempfile.NamedTemporaryFile() as gocryptfs_init_stderr:
# First, detect whether there is an already created filesystem
gocryptfsInfo = [
gocryptfs_cmd,
'-info',
uniqueEncWorkDir
]
retval = subprocess.call(
gocryptfsInfo,
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
cwd=uniqueRawWorkDir,
)
if retval != 0:
# Let's try creating it!
gocryptfsInit = [
gocryptfs_cmd,
'-init',
uniqueEncWorkDir
]
gocryptfsCommand = gocryptfsInit
efs = subprocess.Popen(
gocryptfsInit,
stdin=subprocess.PIPE,
stdout=gocryptfs_init_stdout,
stderr=gocryptfs_init_stderr,
cwd=uniqueRawWorkDir,
)
efs.communicate(input=clearPass.encode('utf-8'))
retval = efs.wait()
if retval == 0:
# And now, let's mount it
gocryptfsMount = [
gocryptfs_cmd,
'-i',str(gocryptfs_idleMinutes)+'m',
]
if allowOther:
gocryptfsMount.append('-allow_other')
gocryptfsMount.extend([
uniqueEncWorkDir,
uniqueWorkDir
])
gocryptfsCommand = gocryptfsMount
efs = subprocess.Popen(
gocryptfsMount,
stdin=subprocess.PIPE,
stdout=gocryptfs_init_stdout,
stderr=gocryptfs_init_stdout,
cwd=uniqueRawWorkDir,
)
efs.communicate(input=clearPass.encode('utf-8'))
retval = efs.wait()
# Reading the output and error for the report
if retval != 0:
with open(gocryptfs_init_stdout.name,"r") as c_stF:
encfs_init_stdout_v = c_stF.read()
with open(gocryptfs_init_stderr.name,"r") as c_stF:
encfs_init_stderr_v = c_stF.read()
errstr = "Could not init/mount gocryptfs (retval {})\nCommand: {}\n======\nSTDOUT\n======\n{}\n======\nSTDERR\n======\n{}".format(retval,' '.join(gocryptfsCommand),encfs_init_stdout_v,encfs_init_stderr_v)
raise WFException(errstr)
ENCRYPTED_FS_MOUNT_IMPLEMENTATIONS = {
EncryptedFSType.EncFS: _mountEncFS,
EncryptedFSType.GoCryptFS: _mountGoCryptFS,
}
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,012
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/engine.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import sys
import tempfile
import atexit
import shutil
import abc
import enum
import glob
import logging
from .common import *
from typing import Any, Dict, List, Set, Tuple
from collections import namedtuple
from .container import Container, ContainerFactory, NoContainerFactory
from .singularity_container import SingularityContainerFactory
from .docker_container import DockerContainerFactory
from .podman_container import PodmanContainerFactory
from rocrate.rocrate import ROCrate
from rocrate.model.computerlanguage import ComputerLanguage
# Constants
WORKDIR_INPUTS_RELDIR = 'inputs'
WORKDIR_INTERMEDIATE_RELDIR = 'intermediate'
WORKDIR_META_RELDIR = 'meta'
WORKDIR_STATS_RELDIR = 'stats'
WORKDIR_OUTPUTS_RELDIR = 'outputs'
WORKDIR_ENGINE_TWEAKS_RELDIR = 'engineTweaks'
WORKDIR_STDOUT_FILE = 'stdout.txt'
WORKDIR_STDERR_FILE = 'stderr.txt'
WORKDIR_WORKFLOW_META_FILE = 'workflow_meta.yaml'
WORKDIR_SECURITY_CONTEXT_FILE = 'credentials.yaml'
WORKDIR_MARSHALLED_STAGE_FILE = 'stage-state.yaml'
WORKDIR_MARSHALLED_EXECUTE_FILE = 'execution-state.yaml'
WORKDIR_MARSHALLED_EXPORT_FILE = 'export-state.yaml'
WORKDIR_PASSPHRASE_FILE = '.passphrase'
STATS_DAG_DOT_FILE = 'dag.dot'
class WorkflowEngineException(Exception):
"""
Exceptions fired by instances of WorkflowEngine
"""
pass
CONTAINER_FACTORY_CLASSES = [
SingularityContainerFactory,
DockerContainerFactory,
PodmanContainerFactory,
NoContainerFactory,
]
class WorkflowEngine(AbstractWorkflowEngineType):
def __init__(self,
cacheDir=None,
workflow_config=None,
local_config=None,
engineTweaksDir=None,
cacheWorkflowDir=None,
cacheWorkflowInputsDir=None,
workDir=None,
outputsDir=None,
outputMetaDir=None,
intermediateDir=None,
tempDir=None,
secure_exec : bool = False,
allowOther : bool = False,
config_directory=None
):
"""
Abstract init method
:param cacheDir:
:param workflow_config:
This one may be needed to identify container overrides
or specific engine versions
:param local_config:
:param engineTweaksDir:
:param cacheWorkflowDir:
:param cacheWorkflowInputsDir:
:param workDir:
:param outputsDir:
:param intermediateDir:
:param tempDir:
:param secure_exec:
:param config_directory:
"""
if local_config is None:
local_config = dict()
if workflow_config is None:
workflow_config = dict()
self.local_config = local_config
if config_directory is None:
config_directory = os.getcwd()
self.config_directory = config_directory
# Getting a logger focused on specific classes
self.logger = logging.getLogger(self.__class__.__name__)
# This one may be needed to identify container overrides
# or specific engine versions
self.workflow_config = workflow_config
# cacheDir
if cacheDir is None:
cacheDir = local_config.get('cacheDir')
if cacheDir is None:
cacheDir = tempfile.mkdtemp(prefix='WfExS', suffix='backend')
# Assuring this temporal directory is removed at the end
atexit.register(shutil.rmtree, cacheDir)
else:
if not os.path.isabs(cacheDir):
cacheDir = os.path.normpath(os.path.join(config_directory,cacheDir))
# Be sure the directory exists
os.makedirs(cacheDir, exist_ok=True)
# We are using as our own caching directory one located at the
# generic caching directory, with the name of the class
# This directory will hold software installations, for instance
self.weCacheDir = os.path.join(cacheDir, self.__class__.__name__)
# Needed for those cases where alternate version of the workflow is generated
if cacheWorkflowDir is None:
cacheWorkflowDir = os.path.join(cacheDir, 'wf-cache')
os.makedirs(cacheWorkflowDir, exist_ok=True)
self.cacheWorkflowDir = cacheWorkflowDir
# Needed for those cases where there is a shared cache
if cacheWorkflowInputsDir is None:
cacheWorkflowInputsDir = os.path.join(cacheDir, 'wf-inputs')
os.makedirs(cacheWorkflowInputsDir, exist_ok=True)
self.cacheWorkflowInputsDir = cacheWorkflowInputsDir
# Setting up working directories, one per instance
if workDir is None:
workDir = tempfile.mkdtemp(prefix='WfExS-exec', suffix='workdir')
# Assuring this temporal directory is removed at the end
atexit.register(shutil.rmtree, workDir)
self.workDir = workDir
# This directory should hold intermediate workflow steps results
if intermediateDir is None:
intermediateDir = os.path.join(workDir, WORKDIR_INTERMEDIATE_RELDIR)
os.makedirs(intermediateDir, exist_ok=True)
self.intermediateDir = intermediateDir
# This directory will hold the final workflow results, which could
# be either symbolic links to the intermediate results directory
# or newly generated content
if outputsDir is None:
outputsDir = os.path.join(workDir, WORKDIR_OUTPUTS_RELDIR)
os.makedirs(outputsDir, exist_ok=True)
self.outputsDir = outputsDir
# This directory will hold diverse metadata, like execution metadata
# or newly generated content
if outputMetaDir is None:
outputMetaDir = os.path.join(workDir, WORKDIR_META_RELDIR, WORKDIR_OUTPUTS_RELDIR)
os.makedirs(outputMetaDir, exist_ok=True)
self.outputMetaDir = outputMetaDir
# This directory will hold stats metadata, as well as the dot representation
# of the workflow execution
outputStatsDir = os.path.join(outputMetaDir,WORKDIR_STATS_RELDIR)
os.makedirs(outputStatsDir, exist_ok=True)
self.outputStatsDir = outputStatsDir
# This directory is here for those files which are created in order
# to tweak or patch workflow executions
# engine tweaks directory
if engineTweaksDir is None:
engineTweaksDir = os.path.join(workDir, WORKDIR_ENGINE_TWEAKS_RELDIR)
os.makedirs(engineTweaksDir, exist_ok=True)
self.engineTweaksDir = engineTweaksDir
# This directory is here for temporary files of any program launched from
# WfExS or the engine itself. It should be set to TMPDIR on subprocess calls
if tempDir is None:
tempDir = tempfile.mkdtemp(prefix='WfExS-exec', suffix='tempdir')
# Assuring this temporal directory is removed at the end
atexit.register(shutil.rmtree, tempDir)
self.tempDir = tempDir
# Setting up common properties
self.docker_cmd = local_config.get('tools', {}).get('dockerCommand', DEFAULT_DOCKER_CMD)
engine_mode = local_config.get('tools', {}).get('engineMode')
if engine_mode is None:
engine_mode = DEFAULT_ENGINE_MODE
else:
engine_mode = EngineMode(engine_mode)
self.engine_mode = engine_mode
container_type = local_config.get('tools', {}).get('containerType')
if container_type is None:
container_type = DEFAULT_CONTAINER_TYPE
else:
container_type = ContainerType(container_type)
if not self.supportsContainerType(container_type):
raise WorkflowEngineException(f"Current implementation of {self.__class__.__name__} does not support {container_type}")
if secure_exec and not self.supportsSecureExecContainerType(container_type):
raise WorkflowEngineException(f"Due technical limitations, secure or paranoid executions are incompatible with {container_type}")
for containerFactory in CONTAINER_FACTORY_CLASSES:
if containerFactory.ContainerType() == container_type:
self.logger.debug(f"Container type {container_type}")
self.container_factory = containerFactory(
cacheDir=cacheDir,
local_config=local_config,
engine_name=self.__class__.__name__,
tempDir=self.tempDir
)
break
else:
raise WorkflowEngineException("FATAL: No container factory implementation for {}".format(container_type))
isUserNS = self.container_factory.supportsFeature('userns')
self.logger.debug(f"Flags: secure => {secure_exec} , userns => {isUserNS} , allowOther => {allowOther}")
if self.container_factory.containerType == ContainerType.Singularity and secure_exec:
if not allowOther and not isUserNS:
self.logger.error(f"Secure executions do not work without either enabling FUSE use_allow_other in /etc/fuse.conf or userns in {container_type} system installation")
if not isUserNS:
self.logger.error(f"Paranoid executions do not work without enabling userns in {container_type} system installation")
# Locating the payloads directory, where the nodejs wrapper should be placed
self.payloadsDir = os.path.join(os.path.dirname(__file__), 'payloads')
# Whether the containers of each step are writable
self.writable_containers = workflow_config.get('writable_containers', False)
if secure_exec and self.writable_containers and self.container_factory.ContainerType() == ContainerType.Singularity:
raise WorkflowEngineException("FATAL: secure execution and writable containers are incompatible when singularity is being used")
self.secure_exec = secure_exec
@classmethod
@abc.abstractmethod
def WorkflowType(cls) -> WorkflowType:
pass
@property
def workflowType(self) -> WorkflowType:
return self.WorkflowType()
@classmethod
@abc.abstractmethod
def SupportedContainerTypes(cls) -> Set[ContainerType]:
pass
@classmethod
@abc.abstractmethod
def SupportedSecureExecContainerTypes(cls) -> Set[ContainerType]:
pass
def supportsContainerType(self, containerType : ContainerType) -> bool:
return containerType in self.SupportedContainerTypes()
def supportsSecureExecContainerType(self, containerType : ContainerType) -> bool:
return containerType in self.SupportedSecureExecContainerTypes()
def getEmptyCrateAndComputerLanguage(self, langVersion: WFLangVersion) -> ComputerLanguage:
"""
Due the internal synergies between an instance of ComputerLanguage
and the RO-Crate it is attached to, both of them should be created
here, just at the same time
"""
wfType = self.workflowType
crate = ROCrate()
compLang = ComputerLanguage(crate, identifier=wfType.rocrate_programming_language, properties={
"name": wfType.name,
"alternateName": wfType.trs_descriptor,
"identifier": {
"@id": wfType.uriTemplate.format(langVersion)
},
"url": {
"@id": wfType.url
},
"version": langVersion
})
return crate , compLang
@abc.abstractmethod
def identifyWorkflow(self, localWf: LocalWorkflow, engineVer: EngineVersion = None) -> Tuple[EngineVersion, LocalWorkflow]:
"""
This method should return the effective engine version needed
to run it when this workflow engine recognizes the workflow type
"""
pass
@abc.abstractmethod
def materializeEngineVersion(self, engineVersion: EngineVersion) -> Tuple[EngineVersion, EnginePath, Fingerprint]:
"""
Method to ensure the required engine version is materialized
It should raise an exception when the exact version is unavailable,
and no replacement could be fetched
"""
pass
def materializeEngine(self, localWf: LocalWorkflow,
engineVersion: EngineVersion = None) -> MaterializedWorkflowEngine:
"""
Method to ensure the required engine version is materialized
It should raise an exception when the exact version is unavailable,
and no replacement could be fetched
"""
# This method can be forced to materialize an specific engine version
if engineVersion is None:
# The identification could return an augmented LocalWorkflow instance
engineVersion, localWf = self.identifyWorkflow(localWf, engineVersion)
if engineVersion is None:
return None
# This is needed for those cases where there is no exact match
# on the available engine version
engineVersion, enginePath, engineFingerprint = self.materializeEngineVersion(engineVersion)
return MaterializedWorkflowEngine(instance=self,
version=engineVersion,
fingerprint=engineFingerprint,
engine_path=enginePath,
workflow=localWf
)
@abc.abstractmethod
def materializeWorkflow(self, matWorfklowEngine: MaterializedWorkflowEngine, offline: bool = False) -> Tuple[MaterializedWorkflowEngine, List[ContainerTaggedName]]:
"""
Method to ensure the workflow has been materialized. It returns the
localWorkflow directory, as well as the list of containers
For Nextflow it is usually a no-op, but for CWL it requires resolution
"""
pass
@abc.abstractmethod
def simpleContainerFileName(self, imageUrl: URIType) -> RelPath:
"""
This method must be implemented to tell which names expect the workflow engine
on its container cache directories when an image is locally materialized
(currently only useful for Singularity)
"""
pass
def materializeContainers(self, listOfContainerTags: List[ContainerTaggedName], offline: bool = False) -> List[Container]:
return self.container_factory.materializeContainers(listOfContainerTags, self.simpleContainerFileName, offline=offline)
@abc.abstractmethod
def launchWorkflow(self, matWfEng: MaterializedWorkflowEngine, inputs: List[MaterializedInput],
outputs: List[ExpectedOutput]) -> Tuple[ExitVal, List[MaterializedInput], List[MaterializedOutput]]:
pass
@classmethod
def ExecuteWorkflow(cls, matWfEng: MaterializedWorkflowEngine, inputs: List[MaterializedInput],
outputs: List[ExpectedOutput]) -> Tuple[ExitVal, List[MaterializedInput], List[MaterializedOutput]]:
exitVal, augmentedInputs, matOutputs = matWfEng.instance.launchWorkflow(matWfEng, inputs, outputs)
return exitVal, augmentedInputs, matOutputs
@classmethod
def MaterializeWorkflow(cls, matWfEng: MaterializedWorkflowEngine, offline: bool = False) -> Tuple[MaterializedWorkflowEngine, List[Container]]:
matWfEng, listOfContainerTags = matWfEng.instance.materializeWorkflow(matWfEng, offline=offline)
listOfContainers = matWfEng.instance.materializeContainers(listOfContainerTags, offline=offline)
return matWfEng, listOfContainers
GuessedCardinalityMapping = {
False: (0, 1),
True: (0, sys.maxsize),
}
GuessedOutputKindMapping = {
GeneratedDirectoryContent.__class__.__name__: ContentKind.Directory,
GeneratedContent.__class__.__name__: ContentKind.File,
}
def identifyMaterializedOutputs(self, expectedOutputs:List[ExpectedOutput], outputsDir:AbsPath, outputsMapping:Mapping[SymbolicOutputName,Any]=None) -> List[MaterializedOutput]:
"""
This method is used to identify outputs by either file glob descriptions
or matching with a mapping
"""
if not isinstance(outputsMapping, dict):
outputsMapping = {}
matOutputs = []
# This is only applied when no outputs sections is specified
if len(expectedOutputs) == 0:
if len(outputsMapping) == 0:
# Engines like Nextflow
iEntry = 0
for entry in os.scandir(outputsDir):
matValues = None
# We are avoiding to enter in loops around '.' and '..'
if entry.is_file():
matValues = [
GeneratedContent(
local=entry.path,
signature=ComputeDigestFromFile(entry.path, repMethod=nihDigest)
)
]
guessedOutputKind = ContentKind.File
elif entry.is_dir(follow_symlinks=False):
matValues = [ GetGeneratedDirectoryContent(entry.path) ]
guessedOutputKind = ContentKind.Directory
if matValues is not None:
outputName = 'unnamed_output_{}'.format(iEntry)
iEntry += 1
matOutput = MaterializedOutput(
name=outputName,
kind=guessedOutputKind,
expectedCardinality=self.GuessedCardinalityMapping[False],
values=matValues
)
matOutputs.append(matOutput)
else:
# Engines like CWL
for outputName, outputVal in outputsMapping.items():
matValues = CWLDesc2Content(outputVal, self.logger)
matValueClassName = matValues[0].__class__.__name__
guessedOutputKind = self.GuessedOutputKindMapping.get(matValueClassName)
if guessedOutputKind is None:
self.logger.error("FIXME: Define mapping for {}".format(matValueClassName))
matOutput = MaterializedOutput(
name=outputName,
kind=guessedOutputKind,
expectedCardinality=self.GuessedCardinalityMapping.get(len(matValues) > 1),
values=matValues
)
matOutputs.append(matOutput)
# This is only applied when the expected outputs is specified
for expectedOutput in expectedOutputs:
cannotBeEmpty = expectedOutput.cardinality[0] != 0
matValues = []
if expectedOutput.glob is not None:
filterMethod = None
if expectedOutput.kind == ContentKind.Directory:
filterMethod = os.path.isdir
else:
filterMethod = os.path.isfile
matchedPaths = []
for matchingPath in glob.iglob(os.path.join(outputsDir,expectedOutput.glob),recursive=True):
# Getting what it is only interesting for this
if filterMethod(matchingPath):
matchedPaths.append(matchingPath)
if len(matchedPaths) == 0 and cannotBeEmpty:
self.logger.warning("Output {} got no path for pattern {}".format(expectedOutput.name, expectedOutput.glob))
for matchedPath in matchedPaths:
theContent = None
if expectedOutput.kind == ContentKind.Directory:
theContent = GetGeneratedDirectoryContent(
matchedPath,
uri=None, # TODO: generate URIs when it is advised
preferredFilename=expectedOutput.preferredFilename
)
elif expectedOutput.kind == ContentKind.File:
theContent = GeneratedContent(
local=matchedPath,
uri=None, # TODO: generate URIs when it is advised
signature=ComputeDigestFromFile(matchedPath, repMethod=nihDigest),
preferredFilename=expectedOutput.preferredFilename
)
else:
# Reading the value from a file, as the glob is telling that
with open(matchedPath, mode='r', encoding='utf-8', errors='ignore') as mP:
theContent = mP.read()
matValues.append(theContent)
else:
outputVal = outputsMapping.get(expectedOutput.name)
if (outputVal is None) and cannotBeEmpty:
self.logger.warning("Output {} got no match from the outputs mapping".format(expectedOutput.name))
matValues = CWLDesc2Content(outputVal, self.logger, expectedOutput)
matOutput = MaterializedOutput(
name=expectedOutput.name,
kind=expectedOutput.kind,
expectedCardinality=expectedOutput.cardinality,
values=matValues
)
matOutputs.append(matOutput)
return matOutputs
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,013
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/utils/docker.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import json
from typing import NamedTuple, Optional, Tuple
import urllib.parse
from dxf import DXF, _schema2_mimetype as DockerManifestV2MIMEType
# Needed for proper error handling
import requests
class DockerHelperException(Exception):
pass
class Credentials(NamedTuple):
domain : str
username : str
password : str
# This is needed to obtain the remote repo digest
class DXFFat(DXF):
# See https://docs.docker.com/registry/spec/manifest-v2-2/ for
# "fat" manifest description
FAT_schema2_mimetype = 'application/vnd.docker.distribution.manifest.list.v2+json'
def get_fat_manifest_and_response(self, alias, http_method='get'):
"""
Request the "fat" manifest for an alias, which returns the list
of all the available architectures, and returns the manifest and
the response.
:param alias: Alias name.
:type alias: str
:rtype: tuple
:returns: Tuple containing the "fat" manifest as a string (JSON)
and the `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`_
"""
try:
headersFATV2 = {
'Accept': self.FAT_schema2_mimetype
}
r = self._request(http_method, 'manifests/' + alias, headers=headersFATV2)
except requests.exceptions.HTTPError as he:
if he.response.status_code != 404:
raise he
headersV2 = {
'Accept': DockerManifestV2MIMEType
}
r = self._request(http_method, 'manifests/' + alias, headers=headersV2)
return r.content.decode('utf-8'), r
def get_fat_manifest_and_dcd(self, alias, http_method='get'):
"""
Request the "fat" manifest for an alias, which returns the list
of all the available architectures, and returns the manifest and
the response.
:param alias: Alias name.
:type alias: str
:rtype: tuple
:returns: Tuple containing the "fat" manifest as a string (JSON)
and the dcd
"""
fat_manifest, r = self.get_fat_manifest_and_response(alias, http_method=http_method)
return fat_manifest, r.headers.get('Docker-Content-Digest')
def get_fat_manifest(self, alias):
"""
Get the "fat" manifest for an alias
:param alias: Alias name.
:type alias: str
:rtype: str
:returns: The "fat" manifest as string (JSON)
"""
fat_manifest, _ = self.get_fat_manifest_and_response(alias)
return fat_manifest
def _get_fat_dcd(self, alias):
"""
Get the Docker-Content-Digest header for the "fat manifest"
of an alias.
:param alias: Alias name.
:type alias: str
:rtype: str
:returns: DCD header for the alias.
"""
# https://docs.docker.com/registry/spec/api/#deleting-an-image
# Note When deleting a manifest from a registry version 2.3 or later,
# the following header must be used when HEAD or GET-ing the manifest
# to obtain the correct digest to delete:
# Accept: application/vnd.docker.distribution.manifest.v2+json
_ , fat_dcd = self.get_fat_manifest_and_dcd(alias, http_method='head')
return fat_dcd
class DockerHelper(abc.ABC):
DEFAULT_DOCKER_REGISTRY = 'docker.io'
DOCKER_REGISTRY = 'registry-1.docker.io'
DEFAULT_ALIAS = 'latest'
def __init__(self):
# Default credentials are no credentials
self.creds = {
None: Credentials(None, None, None)
}
# These credentials are used only when querying
self.choose_domain()
def add_creds(self, username : str, password : str, domain : Optional[str] = None):
self.creds[domain] = Credentials(domain=domain, username=username, password=password)
def choose_domain(self, domain_name : Optional[str] = None):
if domain_name not in self.creds:
domain_name = None
self.domain = self.creds[domain_name]
def _auth(self, dxf, response):
"""Helper method for DXF machinery"""
dxf.authenticate(self.domain.username, self.domain.password, actions=['pull'], response=response)
def query_tag(self, tag : str) -> Tuple[str, str, str, str]:
parsedTag = urllib.parse.urlparse(tag)
if parsedTag.scheme == '':
docker_tag = 'docker://' + tag
parsedTag = urllib.parse.urlparse(docker_tag)
else:
docker_tag = tag
if parsedTag.scheme != 'docker':
raise DockerHelperException(f'Unable to parse {tag} as a Docker tag')
# Deciding the partial repo and alias
splitPos = parsedTag.path.find('@sha256:')
if splitPos == -1:
splitPos = parsedTag.path.find(':')
if splitPos != -1:
repo = parsedTag.path[0:splitPos]
alias = parsedTag.path[splitPos+1:]
else:
repo = parsedTag.path
alias = self.DEFAULT_ALIAS
# Deciding the registry server and finishing adjustment of repo
registry = None
if '.' not in parsedTag.netloc:
registry = self.DEFAULT_DOCKER_REGISTRY
repo = parsedTag.netloc + repo
else:
registry = parsedTag.netloc
repo = repo[1:]
# Last repo adjustment, in case it is a 'library' one
if '/' not in repo:
repo = 'library/' + repo
registryServer = registry
if registry == self.DEFAULT_DOCKER_REGISTRY:
registryServer = self.DOCKER_REGISTRY
# Connecting to the registry
dxffat = DXFFat(registryServer, repo, self._auth)
try:
# This is needed for the cases of compatibility "FAT" manifest
manifest_str , partial_fingerprint = dxffat.get_fat_manifest_and_dcd(alias)
manifest = json.loads(manifest_str)
if manifest.get('schemaVersion',1) == 1:
partial_fingerprint = dxffat._get_dcd(alias)
except Exception as e:
raise DockerHelperException(f"Unable to obtain fingerprint from {tag}. Reason {e}")
return registryServer, repo, alias, partial_fingerprint
#print(dxf.list_aliases())
#
#dxfq = DXF('quay.io', 'biocontainers/samtools', auth)
#
#print(dxfq.list_aliases())
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,014
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/utils/marshalling_handling.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from ..common import *
from functools import partial
import abc
import collections.abc
# This method was inspired by https://stackoverflow.com/a/52989965
def marshall_namedtuple(obj):
"""
This method takes any atomic value, list, dictionary or namedtuple,
and recursively it tries translating namedtuples into dictionaries
"""
recurse = lambda x: map(marshall_namedtuple, x)
obj_is = partial(isinstance, obj)
if hasattr(obj, '_marshall'):
return marshall_namedtuple(obj._marshall())
elif obj_is(tuple) and hasattr(obj, '_fields'): # namedtuple
fields = zip(obj._fields, recurse(obj))
class_name = obj.__class__.__name__
return dict(fields, **{'_type': class_name})
elif obj_is((collections.abc.Mapping,dict)):
return type(obj)(zip(obj.keys(), recurse(obj.values())))
elif obj_is(collections.abc.Iterable) and not obj_is(str):
return type(obj)(recurse(obj))
elif obj_is(abc.ABC):
return {
'_instance_of': obj.__class__.__name__
}
elif obj_is(abc.ABCMeta):
return {
'_class': obj.__name__
}
else:
return obj
def unmarshall_namedtuple(obj, myglobals = None):
"""
This method takes any atomic value, list or dictionary,
and recursively it tries translating dictionaries into namedtuples
"""
recurse = lambda x, myglobals: map(lambda l: unmarshall_namedtuple(l, myglobals), x)
obj_is = partial(isinstance, obj)
if obj_is((collections.abc.Mapping, dict)):
if '_class' in obj: # originally a class
if myglobals is None:
myglobals = globals()
clazz = myglobals[obj['_class']]
return clazz
if '_type' in obj: # originally namedtuple
objn = obj.copy()
theTypeName = objn.pop('_type')
if myglobals is None:
myglobals = globals()
clazz = myglobals[theTypeName]
else:
objn = obj
clazz = type(obj)
#theTypeName = clazz.__name__
fields = dict(zip(objn.keys(), recurse(objn.values(), myglobals)))
#print("{} {} {}".format(clazz, theTypeName, fields))
return clazz(**fields)
elif obj_is(collections.abc.Iterable) and not obj_is(str):
#print(type(obj))
return type(obj)(recurse(obj, myglobals))
else:
return obj
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,015
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/cache_handler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import hashlib
import json
import logging
import os
import os.path
import shutil
import urllib.parse
import uuid
from typing import List, Mapping
from typing import Optional, Tuple, Union
from .common import *
class SchemeHandlerCacheHandler:
def __init__(self, cacheDir, schemeHandlers:Mapping[str,ProtocolFetcher]):
# Getting a logger focused on specific classes
self.logger = logging.getLogger(self.__class__.__name__)
# TODO: create caching database
self.cacheDir = cacheDir
self.schemeHandlers = {}
self.addSchemeHandlers(schemeHandlers)
def addSchemeHandlers(self, schemeHandlers:Mapping[str,ProtocolFetcher]) -> None:
if isinstance(schemeHandlers, dict):
self.schemeHandlers.update(schemeHandlers)
def fetch(self, remote_file:Union[urllib.parse.ParseResult, URIType], destdir:AbsPath, offline:bool, ignoreCache:bool=False, registerInCache:bool=True, secContext:Optional[SecurityContextConfig]=None) -> Tuple[ContentKind, AbsPath, List[URIWithMetadata]]:
# The directory with the content, whose name is based on sha256
if not os.path.exists(destdir):
try:
os.makedirs(destdir)
except IOError:
errstr = "ERROR: Unable to create directory for workflow inputs {}.".format(destdir)
raise WFException(errstr)
# The directory where the symlinks derived from SHA1 obtained from URIs
# to the content are placed
hashDir = os.path.join(destdir,'uri_hashes')
if not os.path.exists(hashDir):
try:
os.makedirs(hashDir)
except IOError:
errstr = "ERROR: Unable to create directory for workflow URI hashes {}.".format(hashDir)
raise WFException(errstr)
# This filename will only be used when content is being fetched
tempCachedFilename = os.path.join(destdir, 'caching-' + str(uuid.uuid4()))
# This is an iterative process, where the URI is resolved and peeled until a basic fetching protocol is reached
inputKind = remote_file
metadata_array = []
while not isinstance(inputKind, ContentKind):
the_remote_file = inputKind
if isinstance(the_remote_file, urllib.parse.ParseResult):
parsedInputURL = the_remote_file
the_remote_file = urllib.parse.urlunparse(the_remote_file)
else:
parsedInputURL = urllib.parse.urlparse(the_remote_file)
input_file = hashlib.sha1(the_remote_file.encode('utf-8')).hexdigest()
metadata_input_file = input_file + '_meta.json'
uriCachedFilename = os.path.join(hashDir, input_file)
uriMetaCachedFilename = os.path.join(hashDir, metadata_input_file)
# TODO: check cached state in future database
# Cleaning up
if registerInCache and ignoreCache:
# Removing the metadata
if os.path.exists(uriMetaCachedFilename):
os.unlink(uriMetaCachedFilename)
# Removing the symlink
if os.path.exists(uriCachedFilename):
os.unlink(uriCachedFilename)
# We cannot remove the content as
# it could be referenced by other symlinks
if not registerInCache or ignoreCache or not os.path.exists(uriCachedFilename) or not os.path.exists(uriMetaCachedFilename):
# As this is a handler for online resources, comply with offline mode
if offline:
raise WFException("Cannot download content in offline mode from {} to {}".format(remote_file, uriCachedFilename))
# Content is fetched here
theScheme = parsedInputURL.scheme.lower()
schemeHandler = self.schemeHandlers.get(theScheme)
if schemeHandler is None:
raise WFException('No {} scheme handler for {} (while processing {})'.format(theScheme, the_remote_file, remote_file))
try:
# Content is fetched here
inputKind, fetched_metadata_array = schemeHandler(the_remote_file, tempCachedFilename, secContext=secContext)
fingerprint = None
if isinstance(inputKind, ContentKind):
if os.path.isfile(tempCachedFilename): # inputKind == ContentKind.File:
fingerprint = ComputeDigestFromFile(tempCachedFilename, repMethod=stringifyFilenameDigest)
putativeInputKind = ContentKind.File
elif os.path.isdir(tempCachedFilename): # inputKind == ContentKind.Directory:
fingerprint = ComputeDigestFromDirectory(tempCachedFilename, repMethod=stringifyFilenameDigest)
putativeInputKind = ContentKind.Directory
else:
raise WFException("Cached {} from {} is neither file nor directory".format(tempCachedFilename, remote_file))
if inputKind != putativeInputKind:
self.logger.error("FIXME: Mismatch at {} : {} vs {}".format(remote_file, inputKind, putativeInputKind))
# Saving the metadata
with open(uriMetaCachedFilename, mode="w", encoding="utf-8") as mOut:
# Serializing the metadata
metaStructure = {
'metadata_array': list(map(lambda m: {'uri': m.uri, 'metadata': m.metadata}, fetched_metadata_array))
}
if fingerprint is not None:
metaStructure['kind'] = str(inputKind.value)
metaStructure['fingerprint'] = fingerprint
else:
metaStructure['resolves_to'] = inputKind
json.dump(metaStructure, mOut)
# Now, creating the symlink
if fingerprint is not None:
finalCachedFilename = os.path.join(destdir, fingerprint)
if os.path.isfile(finalCachedFilename):
os.unlink(finalCachedFilename)
elif os.path.isdir(finalCachedFilename):
shutil.rmtree(finalCachedFilename)
os.rename(tempCachedFilename, finalCachedFilename)
next_input_file = os.path.relpath(finalCachedFilename, hashDir)
else:
next_input_file = hashlib.sha1(inputKind.encode('utf-8')).hexdigest()
if os.path.lexists(uriCachedFilename):
os.unlink(uriCachedFilename)
os.symlink(next_input_file, uriCachedFilename)
except WFException as we:
raise we
except Exception as e:
raise WFException("Cannot download content from {} to {} (while processing {}) (temp file {}): {}".format(the_remote_file, uriCachedFilename, remote_file, tempCachedFilename, e))
else:
with open(uriMetaCachedFilename, mode="r", encoding="utf-8") as mIn:
# Deserializing the metadata
metaStructure = json.load(mIn)
inputKind = metaStructure.get('kind')
if inputKind is None:
inputKind = metaStructure['resolves_to']
else:
inputKind = ContentKind(inputKind)
finalCachedFilename = os.path.normpath(os.path.join(hashDir, os.readlink(uriCachedFilename)))
fetched_metadata_array = list(map(lambda m: URIWithMetadata(m['uri'],m['metadata']), metaStructure['metadata_array']))
# Store the metadata
metadata_array.extend(fetched_metadata_array)
return inputKind, finalCachedFilename, metadata_array
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,016
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/nextflow_engine.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import datetime
import functools
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import yaml
from typing import Any, Dict, List, Set, Tuple
from .common import *
from .engine import WorkflowEngine, WorkflowEngineException
from .engine import WORKDIR_STDOUT_FILE, WORKDIR_STDERR_FILE, STATS_DAG_DOT_FILE
from .fetchers import fetchClassicURL
# A default name for the static bash
DEFAULT_STATIC_BASH_CMD = 'bash.static'
@functools.lru_cache()
def _tzstring():
try:
with open("/etc/timezone","r") as tzreader:
tzstring = tzreader.readline().rstrip()
except:
# The default for the worst case
tzstring = 'Europe/Madrid'
return tzstring
class NextflowWorkflowEngine(WorkflowEngine):
NEXTFLOW_REPO = 'https://github.com/nextflow-io/nextflow'
DEFAULT_NEXTFLOW_VERSION = '19.04.1'
DEFAULT_NEXTFLOW_VERSION_WITH_PODMAN = '20.01.0'
DEFAULT_NEXTFLOW_DOCKER_IMAGE = 'nextflow/nextflow'
DEFAULT_MAX_RETRIES = 5
DEFAULT_MAX_CPUS = 4
ENGINE_NAME = 'nextflow'
SUPPORTED_CONTAINER_TYPES = {
ContainerType.NoContainer,
ContainerType.Singularity,
ContainerType.Docker,
ContainerType.Podman,
}
SUPPORTED_SECURE_EXEC_CONTAINER_TYPES = {
ContainerType.NoContainer,
ContainerType.Singularity,
# ContainerType.Podman,
}
def __init__(self,
cacheDir=None,
workflow_config=None,
local_config=None,
engineTweaksDir=None,
cacheWorkflowDir=None,
cacheWorkflowInputsDir=None,
workDir=None,
outputsDir=None,
outputMetaDir=None,
intermediateDir=None,
tempDir=None,
secure_exec : bool = False,
allowOther : bool = False,
config_directory=None
):
super().__init__(cacheDir=cacheDir, workflow_config=workflow_config, local_config=local_config,
engineTweaksDir=engineTweaksDir, cacheWorkflowDir=cacheWorkflowDir,
cacheWorkflowInputsDir=cacheWorkflowInputsDir,
workDir=workDir, outputsDir=outputsDir, intermediateDir=intermediateDir,
tempDir=tempDir, outputMetaDir=outputMetaDir, secure_exec=secure_exec,
allowOther=allowOther, config_directory=config_directory)
toolsSect = local_config.get('tools', {})
# Obtaining the full path to Java
self.java_cmd = shutil.which(toolsSect.get('javaCommand', DEFAULT_JAVA_CMD))
# Obtaining the full path to static bash
self.static_bash_cmd = shutil.which(toolsSect.get('staticBashCommand', DEFAULT_STATIC_BASH_CMD))
if self.static_bash_cmd is None:
self.logger.warning("Static bash command is not available. It could be needed for some images")
# Deciding whether to unset JAVA_HOME
wfexs_dirname = os.path.dirname(os.path.abspath(sys.argv[0]))
self.unset_java_home = os.path.commonpath([self.java_cmd,wfexs_dirname]) == wfexs_dirname
engineConf = toolsSect.get(self.ENGINE_NAME, {})
workflowEngineConf = workflow_config.get(self.ENGINE_NAME, {})
self.nxf_image = engineConf.get('dockerImage', self.DEFAULT_NEXTFLOW_DOCKER_IMAGE)
nxf_version = workflowEngineConf.get('version')
if nxf_version is None:
if self.container_factory.containerType == ContainerType.Podman:
default_nextflow_version = self.DEFAULT_NEXTFLOW_VERSION_WITH_PODMAN
else:
default_nextflow_version = self.DEFAULT_NEXTFLOW_VERSION
nxf_version = engineConf.get('version', default_nextflow_version)
elif self.container_factory.containerType == ContainerType.Podman and nxf_version < self.DEFAULT_NEXTFLOW_VERSION_WITH_PODMAN:
nxf_version = self.DEFAULT_NEXTFLOW_VERSION_WITH_PODMAN
self.nxf_version = nxf_version
self.max_retries = engineConf.get('maxRetries', self.DEFAULT_MAX_RETRIES)
self.max_cpus = engineConf.get('maxProcesses', self.DEFAULT_MAX_CPUS)
# The profile to force, in case it cannot be guessed
self.nxf_profile = workflowEngineConf.get('profile')
# Setting the assets directory
self.nxf_assets = os.path.join(self.engineTweaksDir,'assets')
os.makedirs(self.nxf_assets, exist_ok=True)
@classmethod
def WorkflowType(cls) -> WorkflowType:
return WorkflowType(
engineName=cls.ENGINE_NAME,
name='Nextflow',
clazz=cls,
uriMatch=[ 'https://www.nextflow.io/' ],
uriTemplate='https://www.nextflow.io/',
url='https://www.nextflow.io/',
trs_descriptor='NFL',
rocrate_programming_language='#nextflow'
)
@classmethod
def SupportedContainerTypes(cls) -> Set[ContainerType]:
return cls.SUPPORTED_CONTAINER_TYPES
@classmethod
def SupportedSecureExecContainerTypes(cls) -> Set[ContainerType]:
return cls.SUPPORTED_SECURE_EXEC_CONTAINER_TYPES
def identifyWorkflow(self, localWf: LocalWorkflow, engineVer: EngineVersion = None) -> Tuple[EngineVersion, LocalWorkflow]:
"""
This method should return the effective engine version needed
to run it when this workflow engine recognizes the workflow type
"""
nfPath = localWf.dir
if localWf.relPath is not None:
nfPath = os.path.join(nfPath, localWf.relPath)
if os.path.isdir(nfPath):
nfDir = nfPath
candidateNf = None
else:
nfDir = os.path.dirname(nfPath)
candidateNf = os.path.basename(nfPath)
nfConfig = os.path.join(nfDir, 'nextflow.config')
verPat = re.compile(r"nextflowVersion *= *['\"]!?[>=]*([^ ]+)['\"]")
mainPat = re.compile(r"mainScript *= *['\"]([^\"]+)['\"]")
engineVer = None
#else:
# # We are deactivating the engine version capture from the config
# verPat = None
if os.path.isfile(nfConfig):
# Now, let's guess the nextflow version and mainScript
with open(nfConfig, "r") as nc_config:
for line in nc_config:
if verPat is not None:
matched = verPat.search(line)
if matched:
engineVer = matched.group(1)
verPat = None
if mainPat is not None:
matched = mainPat.search(line)
if matched:
putativeCandidateNf = matched.group(1)
if candidateNf is not None:
if candidateNf != putativeCandidateNf:
# This should be a warning
raise WorkflowEngineException(
'Nextflow mainScript in manifest {} differs from the one requested {}'.format(
putativeCandidateNf, candidateNf))
else:
candidateNf = putativeCandidateNf
mainPat = None
if candidateNf is None:
# Default case
self.logger.debug("Default candidateNf")
candidateNf = 'main.nf'
entrypoint = os.path.join(nfDir, candidateNf)
self.logger.debug("Testing entrypoint {} (dir {} candidate {})".format(entrypoint, nfDir, candidateNf))
# Checking that the workflow entrypoint does exist
if not os.path.isfile(entrypoint):
raise WorkflowEngineException(
'Could not find mainScript {} in Nextflow workflow directory {} '.format(candidateNf, nfDir))
# Now, the moment to identify whether it is a nextflow workflow
with open(entrypoint,mode='r',encoding='iso-8859-1') as hypNf:
wholeNf = hypNf.read()
# Better recognition is needed, maybe using nextflow
for pat in ('nextflow','process '):
if pat in wholeNf:
break
else:
# No nextflow keyword was detected
return None, None
# Setting a default engineVer
if engineVer is None:
engineVer = self.nxf_version
elif self.container_factory.containerType == ContainerType.Podman and engineVer < self.DEFAULT_NEXTFLOW_VERSION_WITH_PODMAN:
engineVer = self.DEFAULT_NEXTFLOW_VERSION_WITH_PODMAN
# The engine version should be used to create the id of the workflow language
return engineVer, LocalWorkflow(dir=nfDir, relPath=candidateNf, effectiveCheckout=localWf.effectiveCheckout, langVersion=engineVer)
def materializeEngineVersion(self, engineVersion: EngineVersion) -> Tuple[EngineVersion, EnginePath, Fingerprint]:
"""
Method to ensure the required engine version is materialized
It should raise an exception when the exact version is unavailable,
and no replacement could be fetched
"""
nextflow_install_dir = os.path.join(self.weCacheDir,engineVersion)
retval , nxf_install_stdout_v, nxf_install_stderr_v = self.runNextflowCommand(engineVersion,['info'],nextflow_path=nextflow_install_dir)
if retval != 0:
errstr = "Could not install Nextflow {} . Retval {}\n======\nSTDOUT\n======\n{}\n======\nSTDERR\n======\n{}".format(engineVersion,retval,nxf_install_stdout_v,nxf_install_stderr_v)
raise WorkflowEngineException(errstr)
# Getting the version label
verPat = re.compile(r"Version: +(.*)$")
verMatch = verPat.search(nxf_install_stdout_v)
engineFingerprint = verMatch.group(1) if verMatch else None
return engineVersion, nextflow_install_dir, engineFingerprint
def runNextflowCommand(self, nextflow_version: EngineVersion, commandLine: List[str], workdir=None, nextflow_path:EnginePath=None, stdoutFilename:AbsPath=None, stderrFilename:AbsPath=None, runEnv:dict=None) -> Tuple[ExitVal,str,str]:
self.logger.debug('Command => nextflow '+' '.join(commandLine))
if self.engine_mode == EngineMode.Docker:
retval , nxf_run_stdout_v, nxf_run_stderr_v = self.runNextflowCommandInDocker(nextflow_version, commandLine, workdir, stdoutFilename=stdoutFilename, stderrFilename=stderrFilename, runEnv=runEnv)
elif self.engine_mode == EngineMode.Local:
retval , nxf_run_stdout_v, nxf_run_stderr_v = self.runLocalNextflowCommand(nextflow_version, commandLine, workdir, nextflow_install_dir=nextflow_path, stdoutFilename=stdoutFilename, stderrFilename=stderrFilename, runEnv=runEnv)
else:
raise WorkflowEngineException('Unsupported engine mode {} for {} engine'.format(self.engine_mode, self.ENGINE_NAME))
return retval , nxf_run_stdout_v, nxf_run_stderr_v
def runLocalNextflowCommand(self, nextflow_version: EngineVersion, commandLine: List[str], workdir=None, nextflow_install_dir:EnginePath=None, stdoutFilename:AbsPath=None, stderrFilename:AbsPath=None, runEnv:dict=None) -> Tuple[int,str,str]:
if nextflow_install_dir is None:
nextflow_install_dir = os.path.join(self.weCacheDir,nextflow_version)
cachedScript = os.path.join(nextflow_install_dir, 'nextflow')
if not os.path.exists(cachedScript):
os.makedirs(nextflow_install_dir, exist_ok=True)
nextflow_script_url = 'https://github.com/nextflow-io/nextflow/releases/download/v{0}/nextflow'.format(nextflow_version)
self.logger.info("Downloading Nextflow {}: {} => {}".format(nextflow_version,nextflow_script_url, cachedScript))
fetchClassicURL(nextflow_script_url,cachedScript)
# Checking the installer has execution permissions
if not os.access(cachedScript, os.R_OK | os.X_OK):
os.chmod(cachedScript,0o555)
# Now, time to run it
NXF_HOME = os.path.join(nextflow_install_dir,'.nextflow')
instEnv = dict(os.environ if runEnv is None else runEnv)
instEnv['NXF_HOME'] = NXF_HOME
# Needed to tie Nextflow short
instEnv['NXF_OFFLINE'] = 'TRUE'
instEnv['JAVA_CMD'] = self.java_cmd
if self.unset_java_home:
instEnv.pop('NXF_JAVA_HOME',None)
instEnv.pop('JAVA_HOME',None)
instEnv['NXF_WORKDIR'] = workdir if workdir is not None else self.intermediateDir
instEnv['NXF_ASSETS'] = self.nxf_assets
if self.logger.getEffectiveLevel() <= logging.DEBUG:
instEnv['NXF_DEBUG'] = '1'
# instEnv['NXF_DEBUG'] = '2'
#elif self.logger.getEffectiveLevel() <= logging.INFO:
# instEnv['NXF_DEBUG'] = '1'
# FIXME: Should we set NXF_TEMP???
# This is needed to have Nextflow using the cached contents
if self.container_factory.containerType == ContainerType.Singularity:
instEnv['NXF_SINGULARITY_CACHEDIR'] = self.container_factory.cacheDir
# This is done only once
retval = 0
nxf_run_stdout_v = None
nxf_run_stderr_v = None
if not os.path.isdir(NXF_HOME):
with tempfile.NamedTemporaryFile() as nxf_install_stdout:
with tempfile.NamedTemporaryFile() as nxf_install_stderr:
retval = subprocess.Popen(
[cachedScript,'-download'],
stdout=nxf_install_stdout,
stderr=nxf_install_stderr,
cwd=nextflow_install_dir,
env=instEnv
).wait()
# Reading the output and error for the report
if retval != 0:
if os.path.exists(nxf_install_stdout.name):
with open(nxf_install_stdout.name,"r") as c_stF:
nxf_run_stdout_v = c_stF.read()
else:
nxf_run_stdout_v = ''
if os.path.exists(nxf_install_stderr.name):
with open(nxf_install_stderr.name,"r") as c_stF:
nxf_run_stderr_v = c_stF.read()
else:
nxf_run_stderr_v = ''
# And now the command is run
if retval == 0 and isinstance(commandLine,list) and len(commandLine)>0:
nxf_run_stdout = None
nxf_run_stderr = None
try:
if stdoutFilename is None:
nxf_run_stdout = tempfile.NamedTemporaryFile()
stdoutFilename = nxf_run_stdout.name
else:
nxf_run_stdout = open(stdoutFilename, mode='ab+')
if stderrFilename is None:
nxf_run_stderr = tempfile.NamedTemporaryFile()
stderrFilename = nxf_run_stderr.name
else:
nxf_run_stderr = open(stderrFilename, mode='ab+')
retval = subprocess.Popen(
[cachedScript,*commandLine],
stdout=nxf_run_stdout,
stderr=nxf_run_stderr,
cwd=nextflow_install_dir if workdir is None else workdir,
env=instEnv
).wait()
finally:
# Reading the output and error for the report
if nxf_run_stdout is not None:
nxf_run_stdout.seek(0)
nxf_run_stdout_v = nxf_run_stdout.read()
nxf_run_stdout_v = nxf_run_stdout_v.decode('utf-8', 'ignore')
nxf_run_stdout.close()
if nxf_run_stderr is not None:
nxf_run_stderr.seek(0)
nxf_run_stderr_v = nxf_run_stderr.read()
nxf_run_stderr_v = nxf_run_stderr_v.decode('utf-8', 'ignore')
nxf_run_stderr.close()
return retval, nxf_run_stdout_v, nxf_run_stderr_v
def runNextflowCommandInDocker(self,nextflow_version: EngineVersion, commandLine: List[str], workdir=None, stdoutFilename:AbsPath=None, stderrFilename:AbsPath=None, runEnv:dict=None) -> Tuple[ExitVal,str,str]:
# Now, we have to assure the nextflow image is already here
docker_tag = self.nxf_image + ':' + nextflow_version
checkimage_params = [
self.docker_cmd, "images", "--format", "{{.ID}}\t{{.Tag}}", docker_tag
]
retval = 0
nxf_run_stdout_v = None
nxf_run_stderr_v = None
with tempfile.NamedTemporaryFile() as checkimage_stdout:
with tempfile.NamedTemporaryFile() as checkimage_stderr:
retval = subprocess.call(checkimage_params, stdout=checkimage_stdout, stderr=checkimage_stderr)
if retval != 0:
# Reading the output and error for the report
with open(checkimage_stdout.name, "r") as c_stF:
nxf_run_stdout_v = c_stF.read()
with open(checkimage_stderr.name, "r") as c_stF:
nxf_run_stderr_v = c_stF.read()
errstr = "ERROR: Nextflow Engine failed while checking Nextflow image (retval {}). Tag: {}\n======\nSTDOUT\n======\n{}\n======\nSTDERR\n======\n{}".format(
retval, docker_tag, nxf_run_stdout_v, nxf_run_stderr_v)
nxf_run_stderr_v = errstr
do_pull_image = os.path.getsize(checkimage_stdout.name) == 0
if retval == 0 and do_pull_image:
# The image is not here yet
pullimage_params = [
self.docker_cmd, "pull", docker_tag
]
with tempfile.NamedTemporaryFile() as pullimage_stdout:
with tempfile.NamedTemporaryFile() as pullimage_stderr:
retval = subprocess.call(pullimage_params, stdout=pullimage_stdout, stderr=pullimage_stderr)
if retval != 0:
# Reading the output and error for the report
with open(pullimage_stdout.name, "r") as c_stF:
nxf_run_stdout_v = c_stF.read()
with open(pullimage_stderr.name, "r") as c_stF:
nxf_run_stderr_v = c_stF.read()
# It failed!
errstr = "ERROR: Nextflow Engine failed while pulling Nextflow image (retval {}). Tag: {}\n======\nSTDOUT\n======\n{}\n======\nSTDERR\n======\n{}".format(
retval, docker_tag, nxf_run_stdout_v, nxf_run_stderr_v)
nxf_run_stderr_v = errstr
if retval == 0 and isinstance(commandLine,list) and len(commandLine)>0:
# TODO: run it!!!!
nxf_run_stdout_v = ''
try:
if workdir is None:
workdir = self.workDir
else:
os.makedirs(workdir, exist_ok=True)
except Exception as error:
raise WorkflowEngineException("ERROR: Unable to create nextflow working directory. Error: "+str(error))
# Value needed to compose the Nextflow docker call
uid = str(os.getuid())
gid = str(os.getgid())
# Timezone is needed to get logs properly timed
tzstring = _tzstring()
# FIXME: should it be something more restrictive?
homedir = os.path.expanduser("~")
nextflow_install_dir = os.path.join(self.weCacheDir,nextflow_version)
nxf_home = os.path.join(nextflow_install_dir,'.nextflow')
nxf_assets_dir = self.nxf_assets
try:
# Directories required by Nextflow in a Docker
os.makedirs(nxf_assets_dir, exist_ok=True)
except Exception as error:
raise WorkflowEngineException("ERROR: Unable to create nextflow assets directory. Error: "+str(error))
# The fixed parameters
nextflow_cmd_pre_vol = [
self.docker_cmd, "run", "--rm", "--net", "host",
"-e", "USER",
"-e", "NXF_DEBUG",
"-e", "TZ="+tzstring,
"-e", "HOME="+homedir,
"-e", "NXF_ASSETS="+nxf_assets_dir,
"-e", "NXF_USRMAP="+uid,
#"-e", "NXF_DOCKER_OPTS=-u "+uid+":"+gid+" -e HOME="+homedir+" -e TZ="+tzstring+" -v "+workdir+":"+workdir+":rw,rprivate,z -v "+project_path+":"+project_path+":rw,rprivate,z",
"-e", "NXF_DOCKER_OPTS=-u "+uid+":"+gid+" -e HOME="+homedir+" -e TZ="+tzstring+" -v "+workdir+":"+workdir+":rw,rprivate,z",
"-v", "/var/run/docker.sock:/var/run/docker.sock:rw,rprivate,z"
]
validation_cmd_post_vol = [
"-w", workdir,
docker_tag,
"nextflow"
]
validation_cmd_post_vol.extend(commandLine)
validation_cmd_post_vol_resume = [ *validation_cmd_post_vol , '-resume' ]
# This one will be filled in by the volume parameters passed to docker
#docker_vol_params = []
# This one will be filled in by the volume meta declarations, used
# to generate the volume parameters
volumes = [
(homedir+'/',"ro,rprivate,z"),
# (nxf_assets_dir,"rprivate,z"),
(workdir+'/',"rw,rprivate,z"),
# (project_path+'/',"rw,rprivate,z"),
# (repo_dir+'/',"ro,rprivate,z")
]
#
## These are the parameters, including input and output files and directories
#
## Parameters which are not input or output files are in the configuration
#variable_params = [
## ('challenges_ids',challenges_ids),
## ('participant_id',participant_id)
#]
#for conf_key in self.configuration.keys():
# if conf_key not in self.MASKED_KEYS:
# variable_params.append((conf_key,self.configuration[conf_key]))
#
#
#variable_infile_params = [
# ('input',input_loc),
# ('goldstandard_dir',goldstandard_dir_loc),
# ('public_ref_dir',public_ref_dir_loc),
# ('assess_dir',assess_dir_loc)
#]
#
#variable_outfile_params = [
# ('statsdir',stats_loc+'/'),
# ('outdir',results_loc+'/'),
# ('otherdir',other_loc+'/')
#]
#
## The list of populable outputs
#variable_outfile_params.extend(self.populable_outputs.items())
#
## Preparing the RO volumes
#for ro_loc_id,ro_loc_val in variable_infile_params:
# if os.path.exists(ro_loc_val):
# if ro_loc_val.endswith('/') and os.path.isfile(ro_loc_val):
# ro_loc_val = ro_loc_val[:-1]
# elif not ro_loc_val.endswith('/') and os.path.isdir(ro_loc_val):
# ro_loc_val += '/'
# volumes.append((ro_loc_val,"ro,rprivate,z"))
# variable_params.append((ro_loc_id,ro_loc_val))
#
## Preparing the RW volumes
#for rw_loc_id,rw_loc_val in variable_outfile_params:
# # We can skip integrating subpaths of project_path
# if os.path.commonprefix([os.path.normpath(rw_loc_val),project_path]) != project_path:
# if os.path.exists(rw_loc_val):
# if rw_loc_val.endswith('/') and os.path.isfile(rw_loc_val):
# rw_loc_val = rw_loc_val[:-1]
# elif not rw_loc_val.endswith('/') and os.path.isdir(rw_loc_val):
# rw_loc_val += '/'
# elif rw_loc_val.endswith('/'):
# # Forcing the creation of the directory
# try:
# os.makedirs(rw_loc_val)
# except:
# pass
# else:
# # Forcing the creation of the file
# # so docker does not create it as a directory
# with open(rw_loc_val,mode="a") as pop_output_h:
# logger.debug("Pre-created empty output file (ownership purposes) "+rw_loc_val)
# pass
#
# volumes.append((rw_loc_val,"rprivate,z"))
#
# variable_params.append((rw_loc_id,rw_loc_val))
#
# Assembling the command line
validation_params = []
validation_params.extend(nextflow_cmd_pre_vol)
for volume_dir,volume_mode in volumes:
validation_params.append("-v")
validation_params.append(volume_dir+':'+volume_dir+':'+volume_mode)
validation_params_resume = [ *validation_params ]
validation_params.extend(validation_cmd_post_vol)
validation_params_resume.extend(validation_cmd_post_vol_resume)
#
## Last, but not the least important
#validation_params_flags = []
#for param_id,param_val in variable_params:
# validation_params_flags.append("--" + param_id)
# validation_params_flags.append(param_val)
#
#validation_params.extend(validation_params_flags)
#validation_params_resume.extend(validation_params_flags)
#
# Retries system was introduced because an insidious
# bug happens sometimes
# https://forums.docker.com/t/any-known-problems-with-symlinks-on-bind-mounts/32138
retries = self.max_retries
retval = -1
validation_params_cmd = validation_params
run_stdout = None
run_stderr = None
try:
if stdoutFilename is None:
run_stdout = tempfile.NamedTemporaryFile()
stdoutFilename = run_stdout.name
else:
run_stdout = open(stdoutFilename, mode='ab+')
if stderrFilename is None:
run_stderr = tempfile.NamedTemporaryFile()
stderrFilename = run_stderr.name
else:
run_stderr = open(stderrFilename, mode='ab+')
while retries > 0 and retval != 0:
self.logger.debug('"'+'" "'.join(validation_params_cmd)+'"')
run_stdout.flush()
run_stderr.flush()
retval = subprocess.call(validation_params_cmd,stdout=run_stdout,stderr=run_stderr)
if retval != 0:
retries -= 1
self.logger.debug("\nFailed with {} , left {} tries\n".format(retval,retries))
validation_params_cmd = validation_params_resume
finally:
# Reading the output and error for the report
if run_stdout is not None:
run_stdout.seek(0)
nxf_run_stdout_v = run_stdout.read()
nxf_run_stdout_v = nxf_run_stdout_v.decode('utf-8', 'ignore')
run_stdout.close()
if run_stderr is not None:
run_stderr.seek(0)
nxf_run_stderr_v = run_stderr.read()
nxf_run_stderr_v = nxf_run_stderr_v.decode('utf-8', 'ignore')
run_stderr.close()
# Last evaluation
if retval != 0:
# It failed!
errstr = "ERROR: Nextflow Engine failed while executing Nextflow workflow (retval {})\n======\nSTDOUT\n======\n{}\n======\nSTDERR\n======\n{}".format(
retval, nxf_run_stdout_v, nxf_run_stderr_v)
nxf_run_stderr_v = errstr
return retval, nxf_run_stdout_v, nxf_run_stderr_v
# Pattern for searching for process\..*container = ['"]([^'"]+)['"] in dumped config
ContConfigPat = re.compile(r"process\..*container = '(.+)'$",flags=re.MULTILINE)
# Pattern for searching for container ['"]([^'"]+)['"] in main workflow
ContScriptPat = re.compile(r"^\s*container\s+['\"]([^'\"]+)['\"]")
def materializeWorkflow(self, matWorkflowEngine: MaterializedWorkflowEngine, offline: bool = False) -> Tuple[MaterializedWorkflowEngine, List[ContainerTaggedName]]:
"""
Method to ensure the workflow has been materialized. It returns the
localWorkflow directory, as well as the list of containers
For Nextflow it is usually a no-op, but for CWL it requires resolution
"""
# Default nextflow profile is 'standard'
# parse
# nextflow config -flat
localWf = matWorkflowEngine.workflow
nxf_params = [
'config',
'-flat'
]
if self.nxf_profile is not None:
nxf_params.extend(['-profile',self.nxf_profile])
nxf_params.append(localWf.dir)
flat_retval , flat_stdout, flat_stderr = self.runNextflowCommand(
matWorkflowEngine.version,
nxf_params,
workdir=localWf.dir,
nextflow_path=matWorkflowEngine.engine_path
)
if flat_retval != 0:
errstr = """Could not obtain the flat workflow config Nextflow (fingerprint {}) . Retval {}
======
STDOUT
======
{}
======
STDERR
======
{}""".format(matWorkflowEngine.fingerprint,flat_retval,flat_stdout,flat_stderr)
raise WorkflowEngineException(errstr)
# searching for process\..*container = ['"]([^'"]+)['"]
containerTags = set()
for contMatch in self.ContConfigPat.finditer(flat_stdout):
containerTags.add(contMatch.group(1))
# and main workflow for
# container ['"]([^'"]+)['"]
wfEntrypoint = localWf.relPath if os.path.isabs(localWf.relPath) else os.path.join(localWf.dir,localWf.relPath)
with open(wfEntrypoint,encoding='utf-8') as wfH:
for line in wfH:
contMatch = self.ContScriptPat.search(line)
if contMatch:
containerTags.add(contMatch.group(1))
return matWorkflowEngine, list(containerTags)
def simpleContainerFileName(self, imageUrl: URIType) -> RelPath:
"""
This method was borrowed from
https://github.com/nextflow-io/nextflow/blob/539a22b68c114c94eaf4a88ea8d26b7bfe2d0c39/modules/nextflow/src/main/groovy/nextflow/container/SingularityCache.groovy#L80
and translated to Python
"""
p = imageUrl.find('://')
name = imageUrl[p+3:] if p != -1 else imageUrl
extension = '.img'
if '.sif:' in name:
extension = '.sif'
name = name.replace('.sif:','-')
elif name.endswith('.sif'):
extension = '.sif'
name = name[:-4]
name = name.replace(':','-').replace('/','-')
return name + extension
def structureAsNXFParams(self, matInputs: List[MaterializedInput]):
nxpParams = {}
for matInput in matInputs:
node = nxpParams
splittedPath = matInput.name.split('.')
for step in splittedPath[:-1]:
node = node.setdefault(step,{})
nxfValues = []
for value in matInput.values:
if isinstance(value, MaterializedContent):
if value.kind in (ContentKind.Directory, ContentKind.File):
if not os.path.exists(value.local):
self.logger.warning("Input {} has values which are not materialized".format(matInput.name))
nxfValues.append(value.local)
else:
raise WorkflowEngineException(
"ERROR: Input {} has values of type {} this code does not know how to handle".format(matInput.name, value.kind))
else:
nxfValues.append(value)
node[splittedPath[-1]] = nxfValues if len(nxfValues)!=1 else nxfValues[0]
return nxpParams
def augmentNextflowInputs(self, matHash:Mapping[SymbolicParamName,MaterializedInput], allExecutionParams:Mapping[str,Any], prefix='') -> List[MaterializedInput]:
"""
Generate additional MaterializedInput for the implicit params.
"""
augmentedInputs = []
for key,val in allExecutionParams.items():
linearKey = prefix + key
if isinstance(val, dict):
newAugmentedInputs = self.augmentNextflowInputs(matHash, val, prefix=linearKey+'.')
augmentedInputs.extend(newAugmentedInputs)
else:
augmentedInput = matHash.get(linearKey)
if augmentedInput is None:
# Time to create a new materialized input
theValues = val if isinstance(val,list) else [ val ]
augmentedInput = MaterializedInput(name=key,values=theValues)
augmentedInputs.append(augmentedInput)
return augmentedInputs
def launchWorkflow(self, matWfEng: MaterializedWorkflowEngine, matInputs: List[MaterializedInput], outputs: List[ExpectedOutput]) -> Tuple[ExitVal,List[MaterializedInput],List[MaterializedOutput]]:
if len(matInputs) == 0: # Is list of materialized inputs empty?
raise WorkflowEngineException("FATAL ERROR: Execution with no inputs")
localWf = matWfEng.workflow
outputStatsDir = self.outputStatsDir
timelineFile = os.path.join(outputStatsDir, 'timeline.html')
reportFile = os.path.join(outputStatsDir, 'report.html')
traceFile = os.path.join(outputStatsDir, 'trace.tsv')
dagFile = os.path.join(outputStatsDir, STATS_DAG_DOT_FILE)
# Custom variables setup
runEnv = dict(os.environ)
optBash = None
optWritable = None
runEnv.update(self.container_factory.environment)
if self.container_factory.containerType == ContainerType.Singularity:
if self.static_bash_cmd is not None:
optBash = f"-B {self.static_bash_cmd}:/bin/bash"
else:
optBash = ""
if self.writable_containers:
optWritable = "--writable-tmpfs"
elif self.container_factory.supportsFeature('userns'):
optWritable = "--userns"
else:
optWritable = "--pid"
elif self.container_factory.containerType == ContainerType.Podman:
if self.container_factory.supportsFeature('userns'):
optWritable = "--userns=keep-id"
else:
optWritable = ""
forceParamsConfFile = os.path.join(self.engineTweaksDir, 'force-params.config')
with open(forceParamsConfFile, mode="w", encoding="utf-8") as fPC:
if self.container_factory.containerType == ContainerType.Singularity:
print(
f"""docker.enabled = false
podman.enabled = false
singularity.enabled = true
singularity.envWhitelist = '{','.join(self.container_factory.environment.keys())}'
singularity.runOptions = '-B {self.cacheWorkflowInputsDir}:{self.cacheWorkflowInputsDir}:ro {optWritable} {optBash}'
singularity.autoMounts = true
""", file=fPC)
elif self.container_factory.containerType == ContainerType.Docker:
print(
f"""singularity.enabled = false
podman.enabled = false
docker.enabled = true
docker.envWhitelist = '{','.join(self.container_factory.environment.keys())}'
docker.runOptions = '-v {self.cacheWorkflowInputsDir}:{self.cacheWorkflowInputsDir}:ro,Z -e TZ="{_tzstring()}"'
docker.fixOwnership = true
""", file=fPC)
elif self.container_factory.containerType == ContainerType.Podman:
print(
f"""singularity.enabled = false
docker.enabled = false
podman.enabled = true
podman.runOptions = '-v {self.cacheWorkflowInputsDir}:{self.cacheWorkflowInputsDir}:ro,Z {optWritable} -e TZ="{_tzstring()}"'
""", file=fPC)
elif self.container_factory.containerType == ContainerType.NoContainer:
print(
f"""docker.enabled = false
singularity.enabled = false
podman.enabled = false
""", file=fPC)
# Trace fields are detailed at
# https://www.nextflow.io/docs/latest/tracing.html#trace-fields
print(
f"""timeline {{
enabled = true
file = "{timelineFile}"
}}
report {{
enabled = true
file = "{reportFile}"
}}
trace {{
enabled = true
file = "{traceFile}"
fields = 'task_id,process,tag,name,status,exit,module,container,cpus,time,disk,memory,attempt,submit,start,complete,duration,realtime,%cpu,%mem,rss,vmem,peak_rss,peak_vmem,rchar,wchar,syscr,syscw,read_bytes,write_bytes,env,script,error_action'
raw = true
sep = '\0\t\0'
}}
dag {{
enabled = true
file = "{dagFile}"
}}
""", file=fPC)
if self.max_cpus is not None:
print(
f"""
executor.cpus={self.max_cpus}
""", file=fPC)
# Building the NXF trojan horse in order to obtain a full list of
# input parameters, for provenance purposes
trojanDir = os.path.join(self.engineTweaksDir,'nxf_trojan')
if os.path.exists(trojanDir):
shutil.rmtree(trojanDir)
shutil.copytree(localWf.dir, trojanDir)
allParamsFile = os.path.join(self.outputMetaDir,'all-params.json')
with open(os.path.join(trojanDir, localWf.relPath), mode='a+', encoding='utf-8') as tH:
print("""
import groovy.json.JsonOutput
def wfexs_allParams()
{{
new File('{0}').write(JsonOutput.toJson(params))
}}
wfexs_allParams()
""".format(allParamsFile), file=tH)
relInputsFileName = "inputdeclarations.yaml"
inputsFileName = os.path.join(self.workDir, relInputsFileName)
nxpParams = self.structureAsNXFParams(matInputs)
if len(nxpParams) != 0:
try:
with open(inputsFileName, mode="w+", encoding="utf-8") as yF:
yaml.dump(nxpParams, yF)
except IOError as error:
raise WorkflowEngineException(
"ERROR: cannot create input declarations file {}, {}".format(inputsFileName, error))
else:
raise WorkflowEngineException("No parameter was specified! Bailing out")
runName = 'WfExS-run_'+datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
nxf_params = [
'-log',os.path.join(outputStatsDir,'log.txt'),
'-c',forceParamsConfFile,
'run',
'-name',runName,
'-offline',
'-w',self.intermediateDir,
'-with-dag', dagFile,
'-with-report', reportFile,
'-with-timeline', timelineFile,
'-with-trace', traceFile,
'-params-file',inputsFileName,
]
if self.nxf_profile is not None:
nxf_params.extend(['-profile',self.nxf_profile])
# Using the patched workflow instead of
# the original one
nxf_params.append(trojanDir)
# nxf_params.append(localWf.dir)
stdoutFilename = os.path.join(self.outputMetaDir, WORKDIR_STDOUT_FILE)
stderrFilename = os.path.join(self.outputMetaDir, WORKDIR_STDERR_FILE)
launch_retval , launch_stdout, launch_stderr = self.runNextflowCommand(
matWfEng.version,
nxf_params,
workdir=self.outputsDir,
nextflow_path=matWfEng.engine_path,
stdoutFilename=stdoutFilename,
stderrFilename=stderrFilename,
runEnv=runEnv
)
self.logger.debug(launch_retval)
self.logger.debug(launch_stdout)
self.logger.debug(launch_stderr)
# Creating the augmented inputs
if os.path.isfile(allParamsFile):
matHash = {}
for matInput in matInputs:
matHash[matInput.name] = matInput
with open(allParamsFile, mode="r", encoding="utf-8") as aPF:
allExecutionParams = json.load(aPF)
augmentedInputs = self.augmentNextflowInputs(matHash, allExecutionParams)
else:
augmentedInputs = matInputs
# Creating the materialized outputs
matOuputs = []
matOutputs = self.identifyMaterializedOutputs(outputs, self.outputsDir)
return launch_retval, augmentedInputs, matOutputs
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,017
|
stain/WfExS-backend
|
refs/heads/main
|
/setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import sys
import setuptools
# In this way, we are sure we are getting
# the installer's version of the library
# not the system's one
setupDir = os.path.dirname(__file__)
sys.path.insert(0, setupDir)
from wfexs_backend import __version__ as wfexs_backend_version
from wfexs_backend import __author__ as wfexs_backend_author
from wfexs_backend import __license__ as wfexs_backend_license
# Populating the long description
with open("README.md", "r") as fh:
long_description = fh.read()
# Populating the install requirements
with open('requirements.txt') as f:
requirements = []
egg = re.compile(r"#[^#]*egg=([^=&]+)")
for line in f.read().splitlines():
m = egg.search(line)
requirements.append(line if m is None else m.group(1))
package_data = {
'wfexs_backend': [
'payloads/*.bash',
'schemas/*.json',
],
}
setuptools.setup(
name="wfexs_backend",
version=wfexs_backend_version,
scripts=["WfExS-backend.py","WfExS-config-replicator.py"],
package_data=package_data,
author=wfexs_backend_author,
author_email="lrodrin@users.noreply.github.com, jose.m.fernandez@bsc.es",
license=wfexs_backend_license,
description="Workflow Execution Service backend",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/inab/WfExS-backend",
packages=setuptools.find_packages(),
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,018
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/podman_container.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import json
import subprocess
import tempfile
from typing import Dict, List, Tuple
from .common import *
from .container import ContainerFactory, ContainerFactoryException
DOCKER_PROTO = 'docker://'
class PodmanContainerFactory(ContainerFactory):
def __init__(self, cacheDir=None, local_config=None, engine_name='unset', tempDir=None):
super().__init__(cacheDir=cacheDir, local_config=local_config, engine_name=engine_name, tempDir=tempDir)
self.runtime_cmd = local_config.get('tools', {}).get('podmanCommand', DEFAULT_PODMAN_CMD)
self._environment.update({
'XDG_DATA_HOME': self.containersCacheDir,
})
# Now, detect whether userns could work
userns_supported = False
if self.supportsFeature('host_userns'):
userns_supported = True
self._features.add('userns')
self.logger.debug(f'Podman supports userns: {userns_supported}')
@classmethod
def ContainerType(cls) -> ContainerType:
return ContainerType.Podman
def _inspect(self, dockerTag : ContainerTaggedName, matEnv) -> Tuple[int, bytes, str]:
with tempfile.NamedTemporaryFile() as d_out, tempfile.NamedTemporaryFile() as d_err:
self.logger.debug(f"querying podman container {dockerTag}")
d_retval = subprocess.Popen(
[self.runtime_cmd, 'inspect', dockerTag],
env=matEnv,
stdout=d_out,
stderr=d_err
).wait()
self.logger.debug(f"podman inspect {dockerTag} retval: {d_retval}")
with open(d_out.name, mode="rb") as c_stF:
d_out_v = c_stF.read().decode('utf-8', errors='continue')
with open(d_err.name, mode="r") as c_stF:
d_err_v = c_stF.read()
self.logger.debug(f"podman inspect stdout: {d_out_v}")
self.logger.debug(f"podman inspect stderr: {d_err_v}")
return d_retval , d_out_v , d_err_v
def _pull(self, dockerTag : ContainerTaggedName, matEnv) -> Tuple[int, str, str]:
with tempfile.NamedTemporaryFile() as d_out, tempfile.NamedTemporaryFile() as d_err:
self.logger.debug(f"pulling podman container {dockerTag}")
d_retval = subprocess.Popen(
[self.runtime_cmd, 'pull', dockerTag],
env=matEnv,
stdout=d_out,
stderr=d_err
).wait()
self.logger.debug(f"podman pull {dockerTag} retval: {d_retval}")
with open(d_out.name, mode="r") as c_stF:
d_out_v = c_stF.read()
with open(d_err.name,"r") as c_stF:
d_err_v = c_stF.read()
self.logger.debug(f"podman pull stdout: {d_out_v}")
self.logger.debug(f"podman pull stderr: {d_err_v}")
return d_retval , d_out_v , d_err_v
def materializeContainers(self, tagList: List[ContainerTaggedName], simpleFileNameMethod: ContainerFileNamingMethod, offline: bool = False) -> List[Container]:
"""
It is assured the containers are materialized
"""
containersList = []
matEnv = dict(os.environ)
matEnv.update(self.environment)
for tag in tagList:
# It is an absolute URL, we are removing the docker://
if tag.startswith(DOCKER_PROTO):
dockerTag = tag[len(DOCKER_PROTO):]
podmanPullTag = tag
else:
dockerTag = tag
podmanPullTag = DOCKER_PROTO + tag
self.logger.info(f"downloading podman container: {tag}")
d_retval , d_out_v , d_err_v = self._inspect(dockerTag, matEnv)
# Time to pull the image
if d_retval != 0:
d_retval , d_out_v , d_err_v = self._pull(podmanPullTag, matEnv)
if d_retval == 0:
# Second try
d_retval , d_out_v , d_err_v = self._inspect(dockerTag, matEnv)
if d_retval != 0:
errstr = """Could not materialize podman image {}. Retval {}
======
STDOUT
======
{}
======
STDERR
======
{}""".format(podmanPullTag, d_retval, d_out_v, d_err_v)
raise ContainerFactoryException(errstr)
# Parsing the output from docker inspect
try:
manifests = json.loads(d_out_v)
manifest = manifests[0]
except Exception as e:
raise ContainerFactoryException(f"FATAL ERROR: Podman finished properly but it did not properly materialize {tag}: {e}")
# Then, compute the signature
tagId = manifest['Id']
fingerprint = None
if len(manifest['RepoDigests']) > 0:
fingerprint = manifest['RepoDigests'][0]
containersList.append(
Container(
origTaggedName=tag,
taggedName=dockerTag,
signature=tagId,
fingerprint=fingerprint,
type=self.containerType
)
)
return containersList
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,019
|
stain/WfExS-backend
|
refs/heads/main
|
/WfExS-backend.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import atexit
import logging
import os
import sys
import shutil
import tempfile
import enum
import yaml
# We have preference for the C based loader and dumper, but the code
# should fallback to default implementations when C ones are not present
try:
from yaml import CLoader as YAMLLoader, CDumper as YAMLDumper
except ImportError:
from yaml import Loader as YAMLLoader, Dumper as YAMLDumper
from wfexs_backend.workflow import WF
from wfexs_backend import get_WfExS_version
# Adapted from https://gist.github.com/ptmcg/23ba6e42d51711da44ba1216c53af4ea
# in order to show the value instead of the class name
class ArgTypeMixin(enum.Enum):
@classmethod
def argtype(cls, s: str) -> enum.Enum:
try:
return cls(s)
except:
raise argparse.ArgumentTypeError(
f"{s!r} is not a valid {cls.__name__}")
def __str__(self):
return str(self.value)
class WfExS_Commands(ArgTypeMixin, enum.Enum):
Init = 'init'
ConfigValidate = 'config-validate'
Stage = 'stage'
MountWorkDir = 'mount-workdir'
ExportStage = 'export-stage'
OfflineExecute = 'offline-execute'
Execute = 'execute'
ExportResults = 'export-results'
ExportCrate = 'export-crate'
DEFAULT_LOCAL_CONFIG_RELNAME = 'wfexs_config.yml'
LOGGING_FORMAT = '%(asctime)-15s - [%(levelname)s] %(message)s'
if __name__ == "__main__":
wfexs_version = get_WfExS_version()
if wfexs_version[1] is None:
verstr = wfexs_version[0]
else:
verstr = "{0[0]} ({0[1]})".format(wfexs_version)
defaultLocalConfigFilename = os.path.join(os.getcwd(), DEFAULT_LOCAL_CONFIG_RELNAME)
ap = argparse.ArgumentParser(description="WfExS (workflow execution service) backend "+verstr)
ap.add_argument('--log-file', dest="logFilename", help='Store messages in a file instead of using standard error and standard output')
ap.add_argument('-q', '--quiet', dest='logLevel', action='store_const', const=logging.WARNING, help='Only show engine warnings and errors')
ap.add_argument('-v', '--verbose', dest='logLevel', action='store_const', const=logging.INFO, help='Show verbose (informational) messages')
ap.add_argument('-d', '--debug', dest='logLevel', action='store_const', const=logging.DEBUG, help='Show debug messages (use with care, as it can disclose passphrases and passwords)')
ap.add_argument('-L', '--local-config', dest="localConfigFilename", default=defaultLocalConfigFilename, help="Local installation configuration file")
ap.add_argument('--cache-dir', dest="cacheDir", help="Caching directory")
ap.add_argument('-W', '--workflow-config', dest="workflowConfigFilename",
help="Configuration file, describing workflow and inputs")
ap.add_argument('-Z', '--creds-config', dest="securityContextsConfigFilename",
help="Configuration file, describing security contexts, which hold credentials and similar")
ap.add_argument('-J', '--staged-job-dir', dest='workflowWorkingDirectory',
help="Already staged job directory (to be used with {})".format(str(WfExS_Commands.OfflineExecute)))
ap.add_argument('--full', dest='doMaterializedROCrate', action='store_true',
help="Should the RO-Crate contain a copy of the inputs (and outputs)? (to be used with {})".format(' or '.join(map(lambda command: str(command), (WfExS_Commands.ExportStage, WfExS_Commands.ExportCrate)))))
ap.add_argument('command', help='Command to run', nargs='?', type=WfExS_Commands.argtype, choices=WfExS_Commands, default=WfExS_Commands.ConfigValidate)
ap.add_argument('-V', '--version', action='version', version='%(prog)s version ' + verstr)
args = ap.parse_args()
# Setting up the log
loggingConf = {
'format': LOGGING_FORMAT,
}
logLevel = logging.INFO
if args.logLevel:
logLevel = args.logLevel
loggingConf['level'] = logLevel
if args.logFilename is not None:
loggingConf['filename'] = args.logFilename
# loggingConf['encoding'] = 'utf-8'
logging.basicConfig(**loggingConf)
# First, try loading the configuration file
localConfigFilename = args.localConfigFilename
if localConfigFilename and os.path.exists(localConfigFilename):
with open(localConfigFilename, mode="r", encoding="utf-8") as cf:
local_config = yaml.load(cf, Loader=YAMLLoader)
else:
local_config = {}
if localConfigFilename and not os.path.exists(localConfigFilename):
print("[WARNING] Configuration file {} does not exist".format(localConfigFilename), file=sys.stderr)
if args.cacheDir:
local_config['cache-directory'] = args.cacheDir
# In any case, assuring the cache directory does exist
cacheDir = local_config.get('cacheDir')
if cacheDir:
os.makedirs(cacheDir, exist_ok=True)
else:
cacheDir = tempfile.mkdtemp(prefix='wes', suffix='backend')
local_config['cacheDir'] = cacheDir
# Assuring this temporal directory is removed at the end
atexit.register(shutil.rmtree, cacheDir)
# A filename is needed later, in order to initialize installation keys
if not localConfigFilename:
localConfigFilename = defaultLocalConfigFilename
# Hints for the the default path for the Crypt4GH keys
config_directory = os.path.dirname(localConfigFilename)
config_relname = os.path.basename(localConfigFilename)
# Initialize (and create config file)
if args.command in (WfExS_Commands.Init, WfExS_Commands.Stage, WfExS_Commands.Execute):
updated_config, local_config = WF.bootstrap(local_config, config_directory, key_prefix=config_relname)
# Last, should config be saved back?
if updated_config or not os.path.exists(localConfigFilename):
print("* Storing updated configuration at {}".format(localConfigFilename))
with open(localConfigFilename, mode="w", encoding="utf-8") as cf:
yaml.dump(local_config, cf, Dumper=YAMLDumper)
# We are finishing here!
if args.command == WfExS_Commands.Init:
sys.exit(0)
# Is the work already staged?
wfInstance = WF(local_config, config_directory)
# This is needed to be sure the encfs instance is unmounted
if args.command != WfExS_Commands.MountWorkDir:
atexit.register(wfInstance.cleanup)
if args.command in (WfExS_Commands.MountWorkDir, WfExS_Commands.ExportStage, WfExS_Commands.OfflineExecute, WfExS_Commands.ExportResults, WfExS_Commands.ExportCrate):
wfInstance.fromWorkDir(args.workflowWorkingDirectory)
elif not args.workflowConfigFilename:
print("[ERROR] Workflow config was not provided! Stopping.", file=sys.stderr)
sys.exit(1)
elif args.command == WfExS_Commands.ConfigValidate:
retval = wfInstance.validateConfigFiles(args.workflowConfigFilename, args.securityContextsConfigFilename)
sys.exit(retval)
else:
wfInstance.fromFiles(args.workflowConfigFilename, args.securityContextsConfigFilename)
print("* Command \"{}\". Working directory will be {}".format(args.command, wfInstance.workDir), file=sys.stderr)
sys.stderr.flush()
if args.command in (WfExS_Commands.Stage, WfExS_Commands.Execute):
instanceId = wfInstance.stageWorkDir()
print("* Instance {} (to be used with -J)".format(instanceId))
if args.command in (WfExS_Commands.ExportStage, WfExS_Commands.Execute):
wfInstance.createStageResearchObject(args.doMaterializedROCrate)
if args.command in (WfExS_Commands.OfflineExecute, WfExS_Commands.Execute):
wfInstance.executeWorkflow(offline=args.command == WfExS_Commands.OfflineExecute)
if args.command in (WfExS_Commands.ExportResults, WfExS_Commands.Execute):
wfInstance.exportResults()
if args.command in (WfExS_Commands.ExportCrate, WfExS_Commands.Execute):
wfInstance.createResultsResearchObject(args.doMaterializedROCrate)
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,020
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/fetchers/pride.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import io
import json
from typing import List, Optional, Tuple
from urllib import request, parse
import urllib.error
from . import fetchClassicURL
from ..common import *
PRIDE_PROJECTS_REST='https://www.ebi.ac.uk/pride/ws/archive/v2/projects/'
def fetchPRIDEProject(remote_file:URIType, cachedFilename:AbsPath, secContext:Optional[SecurityContextConfig]=None) -> Tuple[Union[URIType, ContentKind], List[URIWithMetadata]]:
"""
Method to resolve contents from PRIDE project ids
:param remote_file:
:param cachedFilename: Destination filename for the fetched content
:param secContext: The security context containing the credentials
"""
parsedInputURL = parse.urlparse(remote_file)
projectId = parsedInputURL.path
metadata_url = parse.urljoin(PRIDE_PROJECTS_REST, projectId)
metadata_array = [
URIWithMetadata(remote_file, {'fetched': metadata_url})
]
metadata = None
try:
metaio = io.BytesIO()
_ , metametaio = fetchClassicURL(metadata_url, metaio)
metadata = json.loads(metaio.getvalue().decode('utf-8'))
metadata_array.extend(metametaio)
except urllib.error.HTTPError as he:
raise WFException("Error fetching PRIDE metadata for {} : {} {}".format(projectId, he.code, he.reason))
try:
pride_project_url = metadata['_links']['datasetFtpUrl']['href']
except Exception as e:
raise WFException("Error processing PRIDE project metadata for {} : {}".format(remote_file, e))
return pride_project_url, metadata_array
# These are schemes from identifiers.org
SCHEME_HANDLERS = {
'pride.project': fetchPRIDEProject,
}
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,021
|
stain/WfExS-backend
|
refs/heads/main
|
/wfexs_backend/common.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import abc
import base64
import enum
import functools
import hashlib
import os
from typing import Any, Callable, List, Mapping, NamedTuple
from typing import NewType, Optional, Pattern, Tuple, Type, Union
# Patching default context in order to load CA certificates from certifi
import certifi
import ssl
def create_augmented_context(purpose=ssl.Purpose.SERVER_AUTH, *, cafile=None, capath=None, cadata=None):
context = ssl.create_default_context(purpose=purpose, cafile=cafile, capath=capath, cadata=cadata)
context.load_verify_locations(cafile=certifi.where())
return context
if ssl._create_default_https_context != create_augmented_context:
ssl._create_default_https_context = create_augmented_context
DEFAULT_GIT_CMD = 'git'
DEFAULT_DOCKER_CMD = 'docker'
DEFAULT_SINGULARITY_CMD = 'singularity'
DEFAULT_PODMAN_CMD = 'podman'
DEFAULT_JAVA_CMD = 'java'
DEFAULT_FUSERMOUNT_CMD = 'fusermount'
class EngineMode(enum.Enum):
Local = 'local'
Docker = 'docker'
DEFAULT_ENGINE_MODE = EngineMode.Local
# Abstraction of input params and output names
SymbolicName = NewType('SymbolicName', str)
SymbolicParamName = NewType('SymbolicParamName', SymbolicName)
SymbolicOutputName = NewType('SymbolicOutputName', SymbolicName)
# The tagged name of a container
ContainerTaggedName = NewType('ContainerTaggedName', str)
URIType = NewType('URIType', str)
# The URL of a git repository containing at least one workflow
RepoURL = NewType('RepoURL', URIType)
# The tag, branch or hash of a workflow in a git repository
RepoTag = NewType('RepoTag', str)
# This is a relative path
RelPath = NewType('RelPath', str)
# This is an absolute path
AbsPath = NewType('AbsPath', str)
# This is also an absolute path
EnginePath = NewType('EnginePath', AbsPath)
# This is a workflow engine version
EngineVersion = NewType('EngineVersion', str)
# This is a workflow language version
WFLangVersion = NewType('WFLangVersion', str)
# This represents a fingerprint from an installation, a docker image, etc...
# It should follow next format
# {0}={1}
# where {0} is the name of the digest (sha256, for instance)
# and {1} is the base64 encoding of the binary digest
Fingerprint = NewType('Fingerprint', str)
# Exit value from any kind of execution
ExitVal = NewType('ExitVal', int)
SecurityContextConfig = Mapping[str, Any]
# As each workflow engine can have its own naming convention, leave them to
# provide it
ContainerFileNamingMethod = Callable[[URIType], RelPath]
## BEWARE!!!! The names of these keys MUST NOT CHANGE
class ContentKind(enum.Enum):
File = 'file'
Directory = 'dir'
Value = 'val'
class URIWithMetadata(NamedTuple):
"""
uri: The uri
metadata: A dictionary with the metadata associated to that URI.
"""
uri: URIType
metadata: Mapping[str,Any]
class MaterializedContent(NamedTuple):
"""
local: Local absolute path of the content which was materialized. It
can be either a path in the cached inputs directory, or an absolute
path in the inputs directory of the execution
uri: Either an URL or a CURIE of the content which was materialized,
needed for the provenance
prettyFilename: The preferred filename to use in the inputs directory
of the execution environment
"""
local: AbsPath
uri: URIType
prettyFilename: RelPath
kind: ContentKind = ContentKind.File
metadata_array: Optional[List[URIWithMetadata]] = None
ProtocolFetcher = Callable[[URIType, AbsPath, Optional[SecurityContextConfig]], Tuple[Union[URIType, ContentKind], List[URIWithMetadata]]]
class MaterializedInput(NamedTuple):
"""
name: Name of the input
values: list of associated values, which can be literal ones or
instances from MaterializedContent
"""
name: SymbolicParamName
values: List[Union[bool, str, int, float, MaterializedContent]]
GlobPattern = NewType('GlobPattern', str)
class ExpectedOutput(NamedTuple):
"""
name: Name of the output. If the workflow engine allows using
symbolic names attached to the outputs, this name must match that.
Otherwise, a matching pattern must be defined.
kind: The kind of output. Either an atomic value.
preferredFilename: Relative "pretty" name which is going to be used
to export the file to external storage.
cardinality: Whether it is expected to be optional, a single value or
multiple ones.
glob: When the workflow engine does not use symbolic
names to label the outputs, this is the filename pattern to capture the
local path, based on the output / working directory.
"""
name: SymbolicOutputName
kind: ContentKind
preferredFilename: RelPath
cardinality: Tuple[int, int]
glob: GlobPattern
def _marshall(self):
mD = {
'c-l-a-s-s': self.kind.name,
'cardinality': list(self.cardinality),
}
if self.preferredFilename is not None:
mD['preferredName'] = self.preferredFilename
if self.glob is not None:
mD['glob'] = self.glob
return mD
@classmethod
def _unmarshall(cls, name, obj):
return cls(
name=name,
kind=ContentKind(obj['c-l-a-s-s']) if 'c-l-a-s-s' in obj else ContentKind.File,
preferredFilename=obj.get('preferredName'),
glob=obj.get('glob')
)
class GeneratedContent(NamedTuple):
"""
local: Local absolute path of the content which was generated. It
is an absolute path in the outputs directory of the execution.
uri: A putative URL or a CURIE of the content which was generated,
needed for the provenance and upload matters.
signature: Computed checksum from the file
preferredFilename: The preferred relative filename to use when it is
uploaded from the computational environment
"""
local: AbsPath
signature: Fingerprint
uri: URIType = None
preferredFilename: RelPath = None
class GeneratedDirectoryContent(NamedTuple):
"""
local: Local absolute path of the content which was generated. It
is an absolute path in the outputs directory of the execution.
uri: A putative URL or a CURIE of the content which was generated,
needed for the provenance and upload matters.
signature: Computed checksum from the file
preferredFilename: The preferred relative filename to use when it is
uploaded from the computational environment
"""
local: AbsPath
values: List[Any] # It should be List[Union[GeneratedContent, GeneratedDirectoryContent]]
uri: URIType = None
preferredFilename: RelPath = None
class MaterializedOutput(NamedTuple):
"""
name: Name of the output. It should be a public identifier whenever it is possible
expectedCardinality: Whether it was expected to be optional, a single value or
multiple ones.
local: Local absolute path of the output
prettyFilename: Relative "pretty" name to be used in provenance
"""
name: SymbolicOutputName
kind: ContentKind
expectedCardinality: Tuple[int, int]
values: List[Union[bool, str, int, float, GeneratedContent, GeneratedDirectoryContent]]
class LocalWorkflow(NamedTuple):
"""
dir: The path to the directory where the checkout was applied
relPath: Inside the checkout, the relative path to the workflow definition
effectiveCheckout: hex hash of the materialized checkout
langVersion: workflow language version / revision
"""
dir: AbsPath
relPath: RelPath
effectiveCheckout: RepoTag
langVersion: WFLangVersion = None
# This skeleton is here only for type mapping reasons
class AbstractWorkflowEngineType(abc.ABC):
pass
TRS_Workflow_Descriptor = str
class WorkflowType(NamedTuple):
"""
engineName: symbolic name of the engine
name: Textual representation of the workflow language
clazz: Class implementing the engine invocation
uriMatch: The URI patterns used in RO-Crate to identify the workflow type
uriTemplate: The URI template to be used when RO-Crate ComputerLanguage is generated
url: The URL used in RO-Crate to represent the workflow language
trs_descriptor: The string used in GA4GH TRSv2 specification to define this workflow type
rocrate_programming_language: Traditional internal id in RO-Crate implementations used for this workflow type (to be deprecated)
"""
engineName: str
name: str
clazz: Type[AbstractWorkflowEngineType]
uriMatch: List[Union[Pattern, URIType]]
uriTemplate: URIType
url: URIType
trs_descriptor: TRS_Workflow_Descriptor
rocrate_programming_language: str
class MaterializedWorkflowEngine(NamedTuple):
"""
instance: Instance of the workflow engine
version: Version of the engine to be used
fingerprint: Fingerprint of the engine to be used (it could be the version)
engine_path: Absolute path to the fetched engine
workflow: Instance of LocalWorkflow
"""
instance: AbstractWorkflowEngineType
version: str
fingerprint: Union[Fingerprint, str]
engine_path: EnginePath
workflow: LocalWorkflow
class ContainerType(enum.Enum):
Singularity = 'singularity'
Docker = 'docker'
UDocker = 'udocker'
Podman = 'podman'
NoContainer = 'none'
DEFAULT_CONTAINER_TYPE = ContainerType.Singularity
class Container(NamedTuple):
"""
origTaggedName: Symbolic name or identifier of the container
(including tag) which appears in the workflow.
taggedName: Symbolic name or identifier of the container (including tag)
type: Container type
localPath: The full local path to the container file (it can be None)
signature: Signature (aka file fingerprint) of the container
(sha256 or similar). It could be None outside Singularity solutions.
fingerprint: Server fingerprint of the container.
Mainly from docker registries.
"""
origTaggedName: str
taggedName: URIType
type: ContainerType
localPath: AbsPath = None
signature: Fingerprint = None
fingerprint: Fingerprint = None
class WFException(Exception):
pass
# Next methods have been borrowed from FlowMaps
DEFAULT_DIGEST_ALGORITHM = 'sha256'
DEFAULT_DIGEST_BUFFER_SIZE = 65536
def stringifyDigest(digestAlgorithm, digest:bytes) -> Union[Fingerprint, bytes]:
return '{0}={1}'.format(digestAlgorithm, str(base64.standard_b64encode(digest), 'iso-8859-1'))
def stringifyFilenameDigest(digestAlgorithm, digest:bytes) -> Union[Fingerprint, bytes]:
return '{0}~{1}'.format(digestAlgorithm, str(base64.urlsafe_b64encode(digest), 'iso-8859-1'))
def nullProcessDigest(digestAlgorithm, digest:bytes) -> Union[Fingerprint, bytes]:
return digest
from rfc6920.methods import generate_nih_from_digest
def nihDigest(digestAlgorithm, digest: bytes) -> Union[Fingerprint, bytes]:
return generate_nih_from_digest(digest, algo=digestAlgorithm)
def ComputeDigestFromFileLike(filelike, digestAlgorithm=DEFAULT_DIGEST_ALGORITHM, bufferSize: int = DEFAULT_DIGEST_BUFFER_SIZE, repMethod=stringifyDigest) -> Fingerprint:
"""
Accessory method used to compute the digest of an input file-like object
"""
h = hashlib.new(digestAlgorithm)
buf = filelike.read(bufferSize)
while len(buf) > 0:
h.update(buf)
buf = filelike.read(bufferSize)
return repMethod(digestAlgorithm, h.digest())
@functools.lru_cache(maxsize=32)
def ComputeDigestFromFile(filename: Union[AbsPath, RelPath], digestAlgorithm=DEFAULT_DIGEST_ALGORITHM, bufferSize: int = DEFAULT_DIGEST_BUFFER_SIZE, repMethod=stringifyDigest) -> Fingerprint:
"""
Accessory method used to compute the digest of an input file
"""
with open(filename, mode='rb') as f:
return ComputeDigestFromFileLike(f, digestAlgorithm, bufferSize, repMethod)
def scantree(path):
"""Recursively yield DirEntry objects for given directory."""
hasDirs = False
for entry in os.scandir(path):
# We are avoiding to enter in loops around '.' and '..'
if entry.is_dir(follow_symlinks=False):
if entry.name[0] != '.':
hasDirs = True
else:
yield entry
# We are leaving the dirs to the end
if hasDirs:
for entry in os.scandir(path):
# We are avoiding to enter in loops around '.' and '..'
if entry.is_dir(follow_symlinks=False) and entry.name[0] != '.':
yield entry
yield from scantree(entry.path)
def ComputeDigestFromDirectory(dirname: Union[AbsPath, RelPath], digestAlgorithm=DEFAULT_DIGEST_ALGORITHM, bufferSize: int = DEFAULT_DIGEST_BUFFER_SIZE, repMethod=stringifyDigest) -> Fingerprint:
"""
Accessory method used to compute the digest of an input directory,
based on the names and digest of the files in the directory
"""
cEntries = [ ]
# First, gather and compute all the files
for entry in scantree(dirname):
if entry.is_file():
cEntries.append(
(
os.path.relpath(entry.path, dirname).encode('utf-8'),
ComputeDigestFromFile(entry.path, repMethod=nullProcessDigest)
)
)
# Second, sort by the relative path, bytes encoded in utf-8
cEntries.sort(key=lambda e: e[0])
# Third, digest compute
h = hashlib.new(digestAlgorithm)
for cRelPathB , cDigest in cEntries:
h.update(cRelPathB)
h.update(cDigest)
return repMethod(digestAlgorithm, h.digest())
def GetGeneratedDirectoryContent(thePath: AbsPath, uri: URIType = None, preferredFilename: RelPath = None) -> GeneratedDirectoryContent:
"""
"""
theValues = []
with os.scandir(thePath) as itEntries:
for entry in itEntries:
# Hidden files are skipped by default
if not entry.name.startswith('.'):
theValue = None
if entry.is_file():
theValue = GeneratedContent(
local=entry.path,
# uri=None,
signature=ComputeDigestFromFile(entry.path, repMethod=nihDigest)
)
elif entry.is_dir():
theValue = GetGeneratedDirectoryContent(entry.path)
if theValue is not None:
theValues.append(theValue)
return GeneratedDirectoryContent(
local=thePath,
uri=uri,
preferredFilename=preferredFilename,
values=theValues
)
CWLClass2WfExS = {
'Directory': ContentKind.Directory,
'File': ContentKind.File
# '???': ContentKind.Value
}
def CWLDesc2Content(cwlDescs: Union[Mapping[str, Any], List[Mapping[str, Any]]], logger, expectedOutput: ExpectedOutput = None) -> List[Union[bool, str, int, float, GeneratedContent, GeneratedDirectoryContent]]:
"""
"""
matValues = []
if not isinstance(cwlDescs, list):
cwlDescs = [cwlDescs]
for cwlDesc in cwlDescs:
foundKind = CWLClass2WfExS.get(cwlDesc['class'])
if (expectedOutput is not None) and foundKind != expectedOutput.kind:
logger.warning("For output {} obtained kind does not match ({} vs {})".format(expectedOutput.name, expectedOutput.kind, foundKind))
matValue = None
if foundKind == ContentKind.Directory:
theValues = CWLDesc2Content(cwlDesc['listing'], logger=logger)
matValue = GeneratedDirectoryContent(
local=cwlDesc['path'],
# TODO: Generate URIs when it is advised
# uri=None
preferredFilename=None if expectedOutput is None else expectedOutput.preferredFilename,
values=theValues
)
elif foundKind == ContentKind.File:
matValue = GeneratedContent(
local=cwlDesc['path'],
signature=ComputeDigestFromFile(cwlDesc['path'], repMethod=nihDigest)
)
if matValue is not None:
matValues.append(matValue)
# What to do with auxiliary/secondary files?
secondaryFiles = cwlDesc.get('secondaryFiles', [])
if len(secondaryFiles) > 0:
matValues.extend(CWLDesc2Content(secondaryFiles, logger))
return matValues
|
{"/wfexs_backend/workflow.py": ["/wfexs_backend/common.py", "/wfexs_backend/encrypted_fs.py", "/wfexs_backend/engine.py", "/wfexs_backend/cache_handler.py", "/wfexs_backend/utils/marshalling_handling.py", "/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/fetchers/pride.py", "/wfexs_backend/fetchers/trs_files.py", "/wfexs_backend/nextflow_engine.py", "/wfexs_backend/cwl_engine.py"], "/wfexs_backend/container.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cwl_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py"], "/wfexs_backend/fetchers/__init__.py": ["/wfexs_backend/common.py"], "/wfexs_backend/fetchers/trs_files.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"], "/wfexs_backend/singularity_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/utils/docker.py"], "/wfexs_backend/encrypted_fs.py": ["/wfexs_backend/common.py"], "/wfexs_backend/engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py", "/wfexs_backend/singularity_container.py", "/wfexs_backend/podman_container.py"], "/wfexs_backend/utils/marshalling_handling.py": ["/wfexs_backend/common.py"], "/wfexs_backend/cache_handler.py": ["/wfexs_backend/common.py"], "/wfexs_backend/nextflow_engine.py": ["/wfexs_backend/common.py", "/wfexs_backend/engine.py", "/wfexs_backend/fetchers/__init__.py"], "/wfexs_backend/podman_container.py": ["/wfexs_backend/common.py", "/wfexs_backend/container.py"], "/WfExS-backend.py": ["/wfexs_backend/workflow.py"], "/wfexs_backend/fetchers/pride.py": ["/wfexs_backend/fetchers/__init__.py", "/wfexs_backend/common.py"]}
|
37,033
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/project/settings_local.py
|
# This file is useful during development and must never be checked in.
import os
# NB: do not set DEBUG here. Some settings depend on it and setting it here has
# no effect. Edit an .env file and set it there. See
# https://django-environ.readthedocs.io/en/latest/ for details.
# Declare or redeclare variables here
FOOFOO = 1
# Uncomment to use PostgreSQL as database or set in an .env file
"""
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": "skeleton",
"USER": "postgres",
"PASSWORD": "",
"HOST": "",
"PORT": "5432",
"CONN_MAX_AGE": 600
}
}
"""
# Uncomment to use memcache as caching backend or set in an .env file
"""
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
"KEY_PREFIX": "skeleton",
},
}
"""
# Uncomment if you are doing performance profiling with Django Debug Toolbar
"""
DEBUG_TOOLBAR_PANELS = [
"ddt_request_history.panels.request_history.RequestHistoryPanel",
"debug_toolbar.panels.versions.VersionsPanel",
"debug_toolbar.panels.timer.TimerPanel",
"debug_toolbar.panels.settings.SettingsPanel",
"debug_toolbar.panels.headers.HeadersPanel",
"debug_toolbar.panels.request.RequestPanel",
"debug_toolbar.panels.sql.SQLPanel",
"debug_toolbar.panels.staticfiles.StaticFilesPanel",
"debug_toolbar.panels.templates.TemplatesPanel",
"debug_toolbar.panels.cache.CachePanel",
"debug_toolbar.panels.signals.SignalsPanel",
"debug_toolbar.panels.logging.LoggingPanel",
"debug_toolbar.panels.redirects.RedirectsPanel",
]
INTERNAL_IPS = ["127.0.0.1"]
RESULTS_CACHE_SIZE = 20000
"""
# If you need to access an existing variable your code must be in configure
def configure(**kwargs):
# Uncomment if you are doing performance profiling with Django Debug Toolbar
"""
return {
"INSTALLED_APPS": kwargs["INSTALLED_APPS"] + ["debug_toolbar"],
"MIDDLEWARE_CLASSES": (
"debug_toolbar.middleware.DebugToolbarMiddleware",
) + kwargs["MIDDLEWARE_CLASSES"]
}
"""
return {}
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,034
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/skeleton/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-10-18 12:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('jmbo', '0003_auto_20160530_1247'),
]
operations = [
migrations.CreateModel(
name='TrivialContent',
fields=[
('modelbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='jmbo.ModelBase')),
],
bases=('jmbo.modelbase',),
),
]
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,035
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/skeleton/management/commands/get_last_foundry_migration.py
|
from django.core.management.base import BaseCommand, CommandError
from south.migration.base import Migrations
class Command(BaseCommand):
def handle(self, *args, **options):
migrations = Migrations('foundry')
print migrations[-1].name()
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,036
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/skeleton/tests/settings/111.py
|
import os
import glob
from os.path import expanduser
BASE_DIR = os.path.join(
glob.glob(os.environ["VIRTUAL_ENV"] + "/lib/*/site-packages")[0],
"skeleton"
)
SECRET_KEY = "SECRET_KEY_PLACEHOLDER"
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
# The order is important
"skeleton",
"mobius",
"jmbo",
"photologue",
"category",
"channels",
"django_comments",
"form_renderers",
"formtools",
"likes",
"link",
"listing",
"mote",
"navbuilder",
"formfactory",
"pagination",
"post",
"preferences",
"secretballot",
"simplemde",
"sites_groups",
"composer",
# TODO: Remove nested_admin once the UI is built
"nested_admin",
# Django apps can be alphabetic
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# These apps have no templates
"cache_headers",
"celery",
"crum",
"layers",
"raven.contrib.django.raven_compat",
"rest_framework",
"rest_framework_extras",
"ultracache",
"webpack_loader"
)
MIDDLEWARE_CLASSES = (
"cache_headers.middleware.CacheHeadersMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"pagination.middleware.PaginationMiddleware",
"composer.middleware.ComposerFallbackMiddleware",
"likes.middleware.SecretBallotUserIpUseragentMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"crum.CurrentRequestUserMiddleware",
)
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": False,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
"composer.context_processors.slots",
"preferences.context_processors.preferences_cp"
],
"loaders": [
"django.template.loaders.filesystem.Loader",
"mote.loaders.app_directories.Loader",
"django.template.loaders.app_directories.Loader",
]
},
},
]
ROOT_URLCONF = "skeleton.tests.urls"
WSGI_APPLICATION = "project.wsgi.application"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "test",
"USER": "postgres",
"PASSWORD": "",
"HOST": "",
"PORT": "",
}
}
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = "/static/"
SITE_ID = 1
REST_FRAMEWORK = {
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly"
],
"DEFAULT_METADATA_CLASS": "rest_framework.metadata.SimpleMetadata",
"DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.URLPathVersioning",
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.BasicAuthentication",
"rest_framework_jwt.authentication.JSONWebTokenAuthentication",
),
}
MEDIA_ROOT = "%s/media/" % BASE_DIR
MEDIA_URL = "/media/"
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse",
}
},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
},
},
"handlers": {
"console": {
"level": "WARN",
"class": "logging.StreamHandler",
"formatter": "verbose"
},
"sentry": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "raven.contrib.django.handlers.SentryHandler",
},
},
"loggers": {
"raven": {
"level": "ERROR",
"handlers": ["console"],
"propagate": True,
},
"sentry.errors": {
"level": "ERROR",
"handlers": ["console"],
"propagate": True,
},
"django": {
"handlers": ["console"],
"level": "WARN",
"propagate": False,
},
},
}
# Dummy cache is the default
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
}
}
WEBPACK_LOADER = {
"DEFAULT": {
"CACHE": not DEBUG,
"BUNDLE_DIR_NAME": "skeleton/generated_statics/bundles/",
"STATS_FILE": os.path.join(BASE_DIR, "static",
"skeleton", "generated_statics",
"bundles",
"skeleton-website-bundlemap.json"),
"POLL_INTERVAL": 0.1,
"TIMEOUT": None,
"IGNORE": [".+\.hot-update.js", ".+\.map"]
}
}
# Celery runs synchronously for tests
CELERY_TASK_ALWAYS_EAGER = True
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgi_redis.RedisChannelLayer",
"CONFIG": {
"hosts": [("localhost", 6379)],
"channel_capacity": {
"daphne.response*": 2000, # Important for stability
"http.connect": 2000,
"http.request": 2000,
"http.response*": 2000,
"http.disconnect": 2000,
"websocket.receive": 2000,
"websocket.send*": 2000,
"websocket.connect": 2000,
"websocket.disconnect": 2000,
},
"group_expiry": 300 # Default 86400, but recommended to be lower
},
"ROUTING": "skeleton.channels.routing.channel_routing",
}
}
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,037
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/skeleton/tasks.py
|
from celery.decorators import periodic_task
from celery.task.schedules import crontab
@periodic_task(
run_every=crontab(hour="*", minute="*/1"),
ignore_result=True
)
def sample_task():
print("Sample task")
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,038
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/skeleton/channels/consumers.py
|
import logging
from channels.generic.websockets import WebsocketConsumer
logger = logging.getLogger(__name__)
class EchoConsumer(WebsocketConsumer):
"""
A consumer that greets you (using your first name, if you are logged in)
and afterwards simply echoes back what you say.
"""
channel_session_user = True
http_user = True
def connect(self, message, **kwargs):
"""
The websocket connection handler.
:param message: A channels Message
"""
logger.debug("A websocket connection was made by {}".format(
message.user))
# Accept the connection
message.reply_channel.send({"accept": True})
name = "Anonymous" if message.user.is_anonymous else \
message.user.first_name or message.user.username
self.send(text="Hello, {}".format(name))
def disconnect(self, message, **kwargs):
"""
The websocket disconnection handler.
:param message: A channels Message
"""
logger.debug("{} disconnected a websocket".format(message.user))
def receive(self, text=None, bytes=None, **kwargs):
"""
This function simply echoes back a message received.
"""
logger.debug("A websocket message was received from {}".format(
self.message.user))
logger.info("Got message: {}".format(text))
self.send(text=text)
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,039
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/project/celery.py
|
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
from celery.beat import Scheduler
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
app = Celery("project")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
class DummyScheduler(Scheduler):
"""A scheduler that does nothing."""
def __init__(self, *args, **kwargs):
super(DummyScheduler, self).__init__(*args, **kwargs)
self.schedule = {}
@app.task(bind=True)
def debug_task(self):
print("Request: {0!r}".format(self.request))
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,040
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/skeleton/channels/routing.py
|
from channels import route_class
from skeleton.channels.consumers import EchoConsumer
channel_routing = [
route_class(EchoConsumer, path=r"^/ws/echo")
]
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,041
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/setup.py
|
from setuptools import setup, find_packages
def get_long_description():
description = ""
for name in ("README.rst", "AUTHORS.rst", "CHANGELOG.rst"):
try:
fp = open(name, "r")
description += fp.read()
except IOError:
pass
finally:
fp.close()
return description
setup(
name="mobius-skeleton",
version="0.1",
description="Create a Mobius project environment quickly.",
long_description = get_long_description(),
author="Praekelt Consulting",
author_email="dev@praekelt.com",
license="BSD",
url="",
packages=find_packages(),
install_requires=[
# Handled by requirements file
],
include_package_data=True,
tests_require=[
"tox",
"tox-run-before"
],
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,042
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/skeleton/tests/__init__.py
|
# Ensure celery is always configured
import project
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,043
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/project/urls.py
|
from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from django.views.static import serve
import rest_framework_extras
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework import routers, serializers, viewsets
from formfactory import api as formfactory_api
from jmbo.admin import ModelBaseAdmin, ModelBaseAdminForm
from jmbo import api as jmbo_api
from listing import api as listing_api
from post import api as post_api
router = routers.DefaultRouter()
rest_framework_extras.discover(router)
rest_framework_extras.register(router)
# Register jmbo suite routers
formfactory_api.register(router)
jmbo_api.register(router)
listing_api.register(router)
post_api.register(router)
admin.autodiscover()
urlpatterns = [
url(r"^", include("skeleton.urls", namespace="skeleton")),
url(r"^mobius/", include("mobius.urls", namespace="mobius")),
url(r"^admin/", include(admin.site.urls)),
url(r"^api/(?P<version>(v1))/", include(router.urls)),
url(r"^api-auth/", include("rest_framework.urls", namespace="rest_framework")),
url(r"^api-auth/$", obtain_jwt_token, name="obtain_token"),
url(r"^auth/", include("django.contrib.auth.urls", namespace="auth")),
url(
r"^formfactory/",
include("formfactory.urls", namespace="formfactory")
),
url(r"^jmbo/", include("jmbo.urls", namespace="jmbo")),
url(r"^link/", include("link.urls", namespace="link")),
url(r"^listing/", include("listing.urls", namespace="listing")),
url(r"^mote/", include("mote.urls", namespace="mote")),
url(r"^navbuilder/", include("navbuilder.urls", namespace="navbuilder")),
url(r"^post/", include("post.urls", namespace="post")),
# Comments can't handle namespaces
url(r"^comments/", include("django_comments.urls")),
url(r"^nested_admin/", include("nested_admin.urls")),
]
if settings.DEBUG:
# Host our own media
urlpatterns += [
url(
r"^media/(?P<path>.*)$",
serve,
{"document_root": settings.MEDIA_ROOT, "show_indexes": True}
),
]
# Expose Django Debug Toolbar if we can import it
try:
import debug_toolbar
urlpatterns += [
url(r"^__debug__/", include(debug_toolbar.urls)),
]
except ImportError:
pass
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,044
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/skeleton/tests/test_channels.py
|
from channels.test import ChannelTestCase, HttpClient
class TestEchoConsumer(ChannelTestCase):
def test_channel(self):
client = HttpClient()
client.send_and_consume(channel=u"websocket.connect", path="/ws/echo/")
self.assertEqual("Hello, Anonymous", client.receive(json=False))
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,045
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/skeleton/models.py
|
from jmbo.models import ModelBase
class TrivialContent(ModelBase):
"""We need one model so South migrations can be initiated."""
pass
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,046
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/project/settings.py
|
from project.settings_mobius import *
# Our app must be first
INSTALLED_APPS = list(INSTALLED_APPS)
INSTALLED_APPS = ["skeleton"] + INSTALLED_APPS
# Configuration for our app
SKELETON = {
}
# settings_local.py is a convenient place to do extra configuration during
# development. However, it is not the right place to set debug - use the .env
# file for that.
try:
import project.settings_local as settings_local
from project.settings_local import *
except ImportError:
pass
else:
if hasattr(settings_local, "configure"):
lcl = locals()
di = settings_local.configure(**locals())
lcl.update(**di)
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,047
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/project/asgi.py
|
import os
os.environ["DJANGO_SETTINGS_MODULE"] = "project.settings"
from channels.asgi import get_channel_layer
channel_layer = get_channel_layer()
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,048
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/skeleton/admin.py
|
from django.contrib import admin
from jmbo.admin import ModelBaseAdmin
from skeleton.models import TrivialContent
admin.site.register(TrivialContent, ModelBaseAdmin)
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.