index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
29,720
|
SungjiCho/ipsi
|
refs/heads/master
|
/suneung/urls.py
|
from django.urls import path
from suneung import views
urlpatterns = [
path('suneung/', views.SuneungList.as_view()),
]
|
{"/review/views.py": ["/university/models.py", "/review/serializers.py"], "/jeongsi/serializers.py": ["/jeongsi/models.py"], "/university/views.py": ["/university/models.py", "/university/serializers.py", "/susi/models.py", "/susi/serializers.py", "/jeongsi/models.py", "/jeongsi/serializers.py"], "/model_to_csv.py": ["/university/models.py", "/susi/models.py", "/jeongsi/models.py", "/suneung/models.py"], "/review/serializers.py": ["/university/models.py"], "/university/admin.py": ["/university/models.py"], "/suneung/admin.py": ["/suneung/models.py"], "/susi/serializers.py": ["/susi/models.py"], "/susi/models.py": ["/university/models.py"], "/university/serializers.py": ["/university/models.py", "/susi/serializers.py", "/jeongsi/serializers.py"], "/jeongsi/models.py": ["/university/models.py", "/susi/models.py"], "/csv_to_model.py": ["/university/models.py", "/susi/models.py", "/jeongsi/models.py", "/suneung/models.py"], "/jeongsi/admin.py": ["/jeongsi/models.py"], "/suneung/views.py": ["/suneung/models.py", "/suneung/serializers.py"], "/suneung/serializers.py": ["/suneung/models.py"], "/susi/admin.py": ["/susi/models.py"]}
|
29,723
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/MYS/apps.py
|
from django.apps import AppConfig
class MysConfig(AppConfig):
name = 'MYS'
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,724
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/index/Models/order.py
|
from django.db import models
from .models import Product
from django.contrib.auth.models import User
import datetime
class Order(models.Model):
productname = models.ForeignKey(Product, on_delete=models.CASCADE)
customer = models.CharField(max_length=50)
quantity = models.IntegerField(default=1)
price = models.IntegerField()
datetime = models.DateField( default=datetime.datetime.now)
address = models.CharField(max_length=200)
mobilno = models.CharField(max_length=20)
confirm = models.CharField( max_length=50 , default="paid")
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,725
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/farmproject/urls.py
|
"""farmproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from index import views
from . import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.firstpage),
path('register/',views.register),
path('login/',views.login),
path('homepage/',views.homepage),
path('search<id>/',views.search),
path('cart',views.order),
path('orderlist/',views.orderlist),
#path('receipt/',views.render_pdf_view),
path('contactus/',views.contactus),
path('mys/',include('MYS.urls')),
path('logout/',views.logout),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,726
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/index/migrations/0010_auto_20210509_2351.py
|
# Generated by Django 3.1.7 on 2021-05-09 18:21
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0009_order'),
]
operations = [
migrations.AlterField(
model_name='order',
name='datetime',
field=models.DateField(default=datetime.datetime.now),
),
]
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,727
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/index/forms.py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
""" usercreationform is inbuilt class which created basic form with validations if we want give extra fields give in fields() but
in djnago User for authentication/registration process so only these fields are given no extra fields are given"""
class myform(UserCreationForm):
class Meta:
model = User
fields =('username','email','first_name','last_name','password1','password2')
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,728
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/index/Models/__init__.py
|
from .models import Product
from .category import Category
from .smartfarming import smartfar
from .order import Order
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,729
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/index/migrations/0005_auto_20210501_0038.py
|
# Generated by Django 3.1.7 on 2021-04-30 19:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0004_product_category'),
]
operations = [
migrations.AlterField(
model_name='product',
name='climatecondition',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='product',
name='description',
field=models.CharField(max_length=300),
),
]
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,730
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/index/templatetags/operations.py
|
from django import template
register = template.Library()
@register.filter(name='price_order')
def price_order(i,quan):
result = i.price * int(quan)
return result
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,731
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/index/Models/smartfarming.py
|
from django.db import models
class smartfar(models.Model):
name=models.CharField(max_length=30)
description=models.CharField(max_length=1000)
image = models.ImageField(upload_to='uploaded/images')
def __str__(self):
return self.name
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,732
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/index/views.py
|
from django.shortcuts import render,redirect
from .forms import myform
from django.http import HttpResponse,HttpResponseRedirect
from django.contrib import messages
from .Models.models import Product
from .Models.models import Category
from .Models.order import Order
from .Models.smartfarming import smartfar
from django.contrib.auth.models import User,auth
from django.http import HttpResponse
from django.template.loader import get_template
from xhtml2pdf import pisa
from django.contrib import messages
# Create your views here.
def firstpage(request):
return render(request,"dashboard.html")
""" it checks the method is POST or not using request.POST and myform is form from forms.py user_form which check values from
myform is valid or not if it is not post it displays blank form """
def register(request):
if request.method == "POST":
user_form = myform(request.POST)
if user_form.is_valid():
user_form.save()
return HttpResponseRedirect("/")
else:
user_form = myform()
return render(request,"registerform.html",{"user_form":user_form})
""" authenticate checks that username and password is valid or ot means it is in user.database or not. In auth.login() gives
permissions for specific user"""
def login(request):
if request.method=="POST":
username=request.POST['username']
password=request.POST['password']
uname = request.POST.get('username')
request.session['uname'] = uname
user = auth.authenticate(username=username,password=password)
#uname = request.POST.get('username')
#request.session['uname'] = uname
if user is not None:
auth.login(request,user)
return HttpResponseRedirect("/homepage/")
else:
messages.error(request,"Invalid id password ")
return redirect("/login")
return render(request,"login.html")
def homepage(request):
try:
print("1")
uname1 = request.session['uname']
print("2")
prod = smartfar.objects.all();
print("3")
return render(request,"homepage.html",{'uname':uname1,'products':prod})
except:
return HttpResponseRedirect("/login/")
def search(request,id):
prod11 = Product.objects.filter(category=id)
if request.method=="GET" :
if prod11:
return render(request,"pk.html",{'products':prod11})
else:
prod11 = Product.objects.all()
return render(request,"pk.html",{'products':prod11})
else:
productid=request.POST.get('demo')
quantity=request.POST.get('quantity')
if quantity:
request.session['productid']=productid
request.session['quantity']=quantity
return HttpResponseRedirect("/cart")
else:
messages.error(request,"please enter quantity")
return redirect('/search3')
def order(request):
if request.method=="POST":
pkid=request.session['productid']
#name1=request.POST.get('name1')
#request.session['name1']=name1
#print(request.session['name1'])
order = Order(productname=Product(id=pkid),customer=request.session['uname'],quantity=request.session['quantity'],price=request.POST.get('price'),address=request.POST.get('address'),mobilno=request.POST.get('mobno'))
order.save()
list1=Order.objects.filter(productname=request.session['productid'],customer=request.session['uname']).order_by('-id')[:1];
name=User.objects.filter(username=request.session['uname'])
print(list1)
template_path = 'receipt.html'
context = {'myvar': list1,'name':name}
# Create a Django response object, and specify content_type as pdf
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="receipt.pdf"'
# find the template and render it.
template = get_template(template_path)
html = template.render(context)
# create a pdf
pisa_status = pisa.CreatePDF(
html, dest=response)
# if error then show some funy view
if pisa_status.err:
return HttpResponse('We had some errors <pre>' + html + '</pre>')
return response
else:
context={}
prodid=request.session['productid']
#context['name']=User.objects.filter(username=request.session['uname'])
context['filteredproduct'] = Product.objects.filter(id=prodid)
context['quan']=request.session['quantity']
return render(request,"cartsection.html",context)
def orderlist(request):
list1=Order.objects.filter(customer=request.session['uname']).order_by('-id');
return render(request,"orderlist.html",{'list1':list1})
"""
def render_pdf_view(request):
list1=Order.objects.filter(productname=request.session['productid'],customer=request.session['uname']).order_by('-id')[:1];
name=User.objects.filter(username=request.session['uname'])
print(list1)
template_path = 'receipt.html'
context = {'myvar': list1,'name':name}
# Create a Django response object, and specify content_type as pdf
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename="report.pdf"'
# find the template and render it.
template = get_template(template_path)
html = template.render(context)
# create a pdf
pisa_status = pisa.CreatePDF(
html, dest=response)
# if error then show some funy view
if pisa_status.err:
return HttpResponse('We had some errors <pre>' + html + '</pre>')
return redirect('/homepage')
"""
def contactus(request):
return render(request,"contact.html")
def logout(request):
try:
del request.session['uname']
del request.session['productid']
del request.session['quantity']
print("4")
auth.logout(request)
except:
auth.logout(request)
return redirect("/")
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,733
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/index/admin.py
|
from django.contrib import admin
# Register your models here.
from .Models.models import Product
from .Models.category import Category
from .Models.smartfarming import smartfar
from .Models.order import Order
class AdminProduct(admin.ModelAdmin):
list_display=['productname','category']
class AdminCategory(admin.ModelAdmin):
list_display=['name']
admin.site.register(Product, AdminProduct)
admin.site.register(Category,AdminCategory)
admin.site.register(smartfar)
admin.site.register(Order)
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,734
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/index/migrations/0009_order.py
|
# Generated by Django 3.1.7 on 2021-05-09 18:15
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('index', '0008_smartfar_image'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('price', models.IntegerField()),
('datetime', models.DateField(default=datetime.datetime.today)),
('address', models.CharField(max_length=200)),
('mobilno', models.CharField(max_length=20)),
('confirm', models.CharField(default='paid', max_length=50)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('productname', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='index.product')),
],
),
]
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,735
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/index/Models/models.py
|
from django.db import models
from .category import Category
# Create your models here.
class Product(models.Model):
productname = models.CharField(max_length=30)
description = models.CharField(max_length=300)
category = models.ForeignKey(Category , on_delete=models.CASCADE)
climatecondition = models.CharField(max_length=100)
weight = models.CharField(max_length=30)
price = models.IntegerField()
image = models.ImageField(upload_to='uploaded/images')
def __str__(self):
return self.productname
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,736
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/MYS/urls.py
|
from django.urls import path
from MYS import views
urlpatterns=[
path('',views.display),
]
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,737
|
siddhantkudal/efarmingportal.github.io
|
refs/heads/main
|
/MYS/views.py
|
from django.shortcuts import render
from .models import mys
import datetime
# Create your views here.
def display(request):
temp=mys.objects.all();
temp2=datetime.datetime.now()
return render(request,"mys.html",{'pdlist':temp,'time':temp2})
|
{"/index/Models/order.py": ["/index/Models/models.py"], "/index/Models/__init__.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"], "/index/views.py": ["/index/forms.py", "/index/Models/models.py", "/index/Models/order.py", "/index/Models/smartfarming.py"], "/index/admin.py": ["/index/Models/models.py", "/index/Models/smartfarming.py", "/index/Models/order.py"]}
|
29,740
|
fattybobcat/foodgram-project
|
refs/heads/master
|
/recipes/views.py
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.paginator import Paginator
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import View
from api.models import Follow
from foodgram.settings import COUNT_RECIPE
from .auxiliary import get_ingredients, tag_collect
from .form import RecipeForm
from .models import IngredientAmount, Recipe
def index(request):
tags, tags_filter = tag_collect(request)
if tags_filter:
recipe_list = Recipe.objects.filter(tags_filter).distinct()
else:
recipe_list = Recipe.objects.all()
paginator = Paginator(recipe_list, COUNT_RECIPE)
page_number = request.GET.get("page")
page = paginator.get_page(page_number)
return render(request,
"index.html",
{"tags": tags,
"page": page,
"paginator": paginator, }
)
@login_required
def new_recipe(request):
"""Create new recipe"""
headline = "Создание рецепта"
button = "Создать рецепт"
form = RecipeForm(request.POST or None, files=request.FILES or None)
ingredients_names = get_ingredients(request)
if request.method == "POST":
keys_form = [*form.data.keys()]
if 'tags' in keys_form:
if form.is_valid():
recipe = form.save(commit=False)
recipe.author = request.user
recipe.save()
for key in ingredients_names:
IngredientAmount.add_ingredient(
IngredientAmount,
recipe.id,
key,
ingredients_names[key][0]
)
form.save_m2m()
return redirect('recipe_single', recipe_id=recipe.id)
error_tag = "Выберите один из предложенных 'тегов'"
return render(request,
"formRecipe.html",
{"form": form,
"headline": headline,
"button": button,
"error_tag": error_tag,
}
)
return render(request,
"formRecipe.html",
{"form": form,
"headline": headline,
"button": button,
}
)
class EditRecipe(View):
""" Form for Edit Recipe """
def get(self, request, recipe_id):
headline = "Редактирование рецепта"
recipe = get_object_or_404(Recipe, id=recipe_id)
ingredients = recipe.amounts.all()
if request.user != recipe.author:
return redirect('index')
form = RecipeForm(request.POST or None,
files=request.FILES or None,
instance=recipe)
return render(request,
"editRecipe.html",
context={'form': form,
'headline': headline,
'recipe': recipe,
'ingredients': ingredients}
)
def post(self, request, recipe_id):
headline = "Редактирование рецепта"
ingredients_names = get_ingredients(request)
recipe = get_object_or_404(Recipe, id=recipe_id)
ingredients = recipe.amounts.all()
form = RecipeForm(request.POST or None,
files=request.FILES or None,
instance=recipe,
)
if request.user != recipe.author:
return redirect('index')
if form.is_valid():
keys_form = [*form.data.keys()]
if 'tags' in keys_form:
IngredientAmount.objects.filter(recipe=recipe).delete()
recipe = form.save(commit=False)
recipe.author = request.user
recipe.save()
form.save()
for key in ingredients_names:
IngredientAmount.add_ingredient(
IngredientAmount,
recipe.id,
key,
ingredients_names[key][0]
)
form.save_m2m()
return redirect('recipe_single', recipe_id=recipe_id)
error_tag = "Выберите один из предложенных 'тегов'"
return render(request,
"editRecipe.html",
context={"form": form,
"headline": headline,
"recipe": recipe,
"ingredients": ingredients,
"error_tag": error_tag,
}
)
return render(request,
"singlePage.html",
{'id': recipe.id,
'headline': headline,
'recipe': recipe,
'ingredients': ingredients_names,
}
)
@login_required
def recipe_delete(request, recipe_id):
recipe = get_object_or_404(Recipe, id=recipe_id)
if recipe.author == request.user:
recipe.delete()
return render(request, 'deleteRecipeDone.html')
def recipe_single(request, recipe_id):
recipe = get_object_or_404(Recipe, id=recipe_id)
ingredients = recipe.amounts.all()
return render(request, "singlePage.html",
{"recipe": recipe,
"ingredients": ingredients,
}
)
def profile(request, username):
username = get_object_or_404(User, username=username)
not_follow = False
if username.username == request.user.username:
not_follow = True
tags, tags_filter = tag_collect(request)
if tags_filter:
recipes = Recipe.objects.filter(
tags_filter
).filter(
author=username
).distinct()
else:
recipes = Recipe.objects.filter(author=username)
paginator = Paginator(recipes, COUNT_RECIPE)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(request,
'pageAuthor.html',
{'recipes': recipes,
'page': page,
'paginator': paginator,
'username1': username,
"tags": tags,
"not_follow": not_follow,
}
)
@login_required()
def shopping_list(request):
shop_list = Recipe.objects.filter(
wishlist_recipe__user__id=request.user.id).all()
shop_list_count = shop_list.count()
print(shop_list_count)
return render(request,
"shopList.html",
{"shop_list": shop_list,
"shop_list_count": shop_list_count,
}
)
def download_wishlist(request):
recipes_shop_list = Recipe.objects.filter(
wishlist_recipe__user__id=request.user.id).all()
ingredient_list = IngredientAmount.objects.filter(
recipe__in=recipes_shop_list)
summary = []
ingredients = {}
for item in ingredient_list:
if item.ingredient in ingredients.keys():
ingredients[item.ingredient] += item.amount
else:
ingredients[item.ingredient] = item.amount
for ing, amount in ingredients.items():
summary.append('{} - {} {} \n'.format(
ing.title, amount, ing.dimension)
)
response = HttpResponse(
summary, content_type='application/text charset=utf-8'
)
response['Content-Disposition'] = 'attachment; filename="ShoppingList.txt"'
return response
def follow_index(request):
follow_list = Follow.objects.filter(
user__id=request.user.id).all()
paginator = Paginator(follow_list, COUNT_RECIPE)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(request,
'followPage.html',
{'follow_list': follow_list,
'page': page,
'paginator': paginator, }
)
def favorite(request):
tags, tags_filter = tag_collect(request)
if tags_filter:
recipe_list = Recipe.objects.filter(
tags_filter).filter(
favorite_recipe__user__id=request.user.id).distinct()
else:
recipe_list = Recipe.objects.filter(
favorite_recipe__user__id=request.user.id).all()
paginator = Paginator(recipe_list, 6)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(request,
'favoriteRecipes.html',
{'recipe_list': recipe_list,
'page': page,
'paginator': paginator,
"tags": tags,
}
)
def about(request):
return render(request, "about.html")
def tech(request):
return render(request, "tech.html")
def page_not_found(request, exception):
return render(request, "404.html", {"path": request.path}, status=404)
def server_error(request):
return render(request, "500.html", status=500)
|
{"/recipes/views.py": ["/api/models.py", "/foodgram/settings.py", "/recipes/auxiliary.py", "/recipes/models.py"], "/api/views.py": ["/recipes/models.py", "/api/models.py"], "/recipes/templatetags/recipes_tag_filter.py": ["/api/models.py", "/recipes/models.py"], "/api/models.py": ["/recipes/models.py"], "/recipes/auxiliary.py": ["/recipes/models.py"], "/recipes/admin.py": ["/recipes/models.py"]}
|
29,741
|
fattybobcat/foodgram-project
|
refs/heads/master
|
/api/views.py
|
import json
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.views import View
from recipes.models import Ingredient, Recipe
from .models import FavoriteRecipe, Follow, Wishlist
def ingredient_hints(request):
text = request.GET.get("query").lower()
ing_list = Ingredient.objects.filter(title__istartswith=text
).order_by('title')
result = [
{"title": item.title, "dimension": item.dimension} for item in ing_list
]
return JsonResponse(result, safe=False)
class BaseView(View):
model = None
item_id = None
model_get = None
item_get = None
filter_kwargs = {"key": "value"}
fields = (None,)
def post(self, request, filter_kwargs):
req = json.loads(request.body)
self.item_id = req.get("id", None)
if self.item_id:
self.item_get = get_object_or_404(self.model_get, id=self.item_id)
self.filter_kwargs[self.fields[0]] = self.item_get
obj, created = self.model.objects.get_or_create(
**self.filter_kwargs
)
if created:
return JsonResponse({"success": True})
return JsonResponse({"success": False})
return JsonResponse({"success": False}, status=400)
def delete(self, request, id):
self.item_get = get_object_or_404(
self.model, **self.filter_kwargs
)
self.item_get.delete()
return JsonResponse({"success": True})
class FavoriteApi(LoginRequiredMixin, BaseView):
model = FavoriteRecipe
model_get = Recipe
fields = ("recipe",)
def post(self, request):
self.filter_kwargs = {"user": request.user,
"recipe": self.item_get, }
return super(FavoriteApi, self).post(request, self.filter_kwargs)
def delete(self, request, id):
self.filter_kwargs = {"user": request.user,
"recipe": id, }
return super(FavoriteApi, self).delete(request, self.filter_kwargs)
class SubscriptionApi(LoginRequiredMixin, BaseView):
model = Follow
model_get = User
fields = ("author",)
def post(self, request):
self.filter_kwargs = {"user": request.user,
"author": self.item_get, }
return super(SubscriptionApi, self).post(request, self.filter_kwargs)
def delete(self, request, id):
self.filter_kwargs = {"user": request.user,
"author": id, }
return super(SubscriptionApi, self).delete(request, self.filter_kwargs)
class WishlistApi(BaseView):
model = Wishlist
model_get = Recipe
fields = ("recipe",)
def post(self, request):
self.filter_kwargs = {"user": request.user,
"recipe": self.item_get, }
return super(WishlistApi, self).post(request, self.filter_kwargs)
def delete(self, request, id):
self.filter_kwargs = {"user": request.user,
"recipe": id, }
return super(WishlistApi, self).delete(request, self.filter_kwargs)
|
{"/recipes/views.py": ["/api/models.py", "/foodgram/settings.py", "/recipes/auxiliary.py", "/recipes/models.py"], "/api/views.py": ["/recipes/models.py", "/api/models.py"], "/recipes/templatetags/recipes_tag_filter.py": ["/api/models.py", "/recipes/models.py"], "/api/models.py": ["/recipes/models.py"], "/recipes/auxiliary.py": ["/recipes/models.py"], "/recipes/admin.py": ["/recipes/models.py"]}
|
29,742
|
fattybobcat/foodgram-project
|
refs/heads/master
|
/recipes/templatetags/recipes_tag_filter.py
|
from django import template
from api.models import FavoriteRecipe, Follow, Wishlist
from recipes.models import TAG_CHOICES
register = template.Library()
@register.filter
def get_recipe_tag(tags_list):
tags = ""
if "breakfast" in tags_list:
tags += str('<li class="card__item">'
'<span class="badge badge_style_orange">'
'Завтрак</span></li>')
if 'lunch' in tags_list:
tags += str('<li class="card__item">'
'<span class="badge badge_style_green">'
'Обед</span></li>')
if 'dinner' in tags_list:
tags += str(
'<li class="card__item">'
'<span class="badge badge_style_purple">'
'Ужин</span></li>'
)
return tags
@register.filter
def get_description_new_lines(description_recipe):
description_list = description_recipe.split('\n')
description = ""
for i in range(len(description_list)):
description += str(
f'<p class=" single-card__section-text">{description_list[i]}</p>'
)
return description
@register.filter
def get_is_favorite(recipe, user):
return FavoriteRecipe.objects.filter(user=user, recipe=recipe).exists()
@register.filter
def get_is_follow(recipe, user):
return Follow.objects.filter(user=user, author=recipe.author).exists()
@register.filter
def get_is_follow2(author, user):
return Follow.objects.filter(user=user, author=author).exists()
@register.filter
def is_shop(recipe, user):
return Wishlist.objects.filter(user=user, recipe=recipe).exists()
@register.filter
def get_tags(request, tag):
if "tag" in request.GET:
tag_list = request.GET["tag"]
tag_list = tag_list.split("__")
if tag not in tag_list:
tag_list.append(tag)
else:
tag_list.remove(tag)
if "" in tag_list:
tag_list.remove("")
result = "__".join(tag_list)
return result
return tag
@register.simple_tag
def set_tags(request, tags, value):
"""Устанавливает get параметры в зависимости
от выбранных тегов"""
request_object = request.GET.copy()
if request.GET.get(value):
request_object.pop(value)
elif value in tags:
for tag in tags:
if tag != value:
request_object[tag] = "tag"
else:
request_object[value] = "tag"
return request_object.urlencode()
@register.simple_tag
def set_page(request, value):
"""Устанавливает get параметры в зависимости
от выбранной страницы"""
request_object = request.GET.copy()
request_object["page"] = value
return request_object.urlencode()
@register.filter
def get_tag_value(tag):
"""Возвращает значения тега на русском языке"""
return dict(TAG_CHOICES)[tag]
|
{"/recipes/views.py": ["/api/models.py", "/foodgram/settings.py", "/recipes/auxiliary.py", "/recipes/models.py"], "/api/views.py": ["/recipes/models.py", "/api/models.py"], "/recipes/templatetags/recipes_tag_filter.py": ["/api/models.py", "/recipes/models.py"], "/api/models.py": ["/recipes/models.py"], "/recipes/auxiliary.py": ["/recipes/models.py"], "/recipes/admin.py": ["/recipes/models.py"]}
|
29,743
|
fattybobcat/foodgram-project
|
refs/heads/master
|
/recipes/models.py
|
from django.contrib.auth import get_user_model
from django.core.validators import MinValueValidator
from django.db import models
from multiselectfield import MultiSelectField
User = get_user_model()
TAG_CHOICES = [
("breakfast", "Завтрак"),
("lunch", "Обед"),
("dinner", "Ужин"),
]
class Ingredient(models.Model):
"""Ингредиенты"""
title = models.CharField(max_length=300,
verbose_name="Название ингредиента",
)
dimension = models.CharField(max_length=30,
verbose_name="Единица измерения",
)
class Meta:
verbose_name = "Ингредиент"
verbose_name_plural = "Ингредиенты"
def __str__(self):
return str(self.title)
class IngredientAmount(models.Model):
"""Ингредиенты в рецепте"""
amount = models.PositiveIntegerField(default=1,
verbose_name="Количество",
)
ingredient = models.ForeignKey(Ingredient,
on_delete=models.CASCADE,
related_name="amounts"
)
recipe = models.ForeignKey('Recipe',
on_delete=models.CASCADE,
related_name="amounts"
)
def add_ingredient(self, recipe_id, title, amount):
if int(amount) > 0:
ingredient, create = Ingredient.objects.get_or_create(title=title)
return self.objects.get_or_create(recipe_id=recipe_id,
ingredient=ingredient,
amount=amount)
class Recipe(models.Model):
"""Рецепты"""
author = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="recipes",
verbose_name="Автор",
)
title = models.CharField(max_length=300,
verbose_name="Название рецепта",
)
description = models.TextField(max_length=4000,
verbose_name="Описание"
)
pub_date = models.DateTimeField("Дата добавления",
auto_now_add=True,
db_index=True
)
image = models.ImageField(upload_to="recipes/",
blank=True,
null=True,
verbose_name="Изображение",
)
tags = MultiSelectField(choices=TAG_CHOICES,
blank=True,
null=True,
verbose_name="Теги",
)
time = models.PositiveIntegerField(validators=[MinValueValidator(1)],
verbose_name="Время приготовления")
ingredients = models.ManyToManyField(Ingredient,
through=IngredientAmount,
through_fields=("recipe",
"ingredient"
),
verbose_name="Список ингредиентов",
)
def __str__(self):
return str(self.title)
class Meta:
ordering = ["-pub_date"]
verbose_name = "Рецепт"
verbose_name_plural = "Рецепты"
def get_ingredients(self):
return "\n".join(
self.ingredient.all().values_list("title", flat=True))
get_ingredients.short_description = "Ингредиенты"
|
{"/recipes/views.py": ["/api/models.py", "/foodgram/settings.py", "/recipes/auxiliary.py", "/recipes/models.py"], "/api/views.py": ["/recipes/models.py", "/api/models.py"], "/recipes/templatetags/recipes_tag_filter.py": ["/api/models.py", "/recipes/models.py"], "/api/models.py": ["/recipes/models.py"], "/recipes/auxiliary.py": ["/recipes/models.py"], "/recipes/admin.py": ["/recipes/models.py"]}
|
29,744
|
fattybobcat/foodgram-project
|
refs/heads/master
|
/api/models.py
|
from django.contrib.auth import get_user_model
from django.db import models
from recipes.models import Recipe
User = get_user_model()
class Follow(models.Model):
"""Model for subscriptions"""
user = models.ForeignKey(User,
on_delete=models.CASCADE,
null=True,
related_name="follower",
verbose_name="Пользователь",
)
author = models.ForeignKey(User,
on_delete=models.CASCADE,
null=True,
related_name="following",
verbose_name="Автор",
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['user', 'author'],
name='unique_subscription'
),
]
verbose_name = "Подписка"
verbose_name_plural = "Подписки"
def __str__(self):
return f'User: {self.user}, author: {self.author}'
class FavoriteRecipe(models.Model):
"""Favorite recipes"""
user = models.ForeignKey(User,
on_delete=models.CASCADE,
null=True,
related_name="favoriter",
verbose_name="Пользователь",
)
recipe = models.ForeignKey(Recipe,
on_delete=models.CASCADE,
related_name="favorite_recipe",
verbose_name="Избранный рецепт",
)
class Meta:
verbose_name = "Избранный рецепт"
verbose_name_plural = "Избранные рецепты"
class Wishlist(models.Model):
"""List wishlist ingredient of recipes"""
user = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="wishlist_subscriber",
verbose_name="Пользователь",
)
recipe = models.ForeignKey(Recipe,
on_delete=models.CASCADE,
related_name="wishlist_recipe",
verbose_name="Список для покупок ",
)
class Meta:
verbose_name = "Список"
verbose_name_plural = "Списки"
|
{"/recipes/views.py": ["/api/models.py", "/foodgram/settings.py", "/recipes/auxiliary.py", "/recipes/models.py"], "/api/views.py": ["/recipes/models.py", "/api/models.py"], "/recipes/templatetags/recipes_tag_filter.py": ["/api/models.py", "/recipes/models.py"], "/api/models.py": ["/recipes/models.py"], "/recipes/auxiliary.py": ["/recipes/models.py"], "/recipes/admin.py": ["/recipes/models.py"]}
|
29,745
|
fattybobcat/foodgram-project
|
refs/heads/master
|
/recipes/auxiliary.py
|
from django.db.models import Q
from .models import TAG_CHOICES
def get_ingredients(request):
ing_dict = {}
for key in request.POST:
if key.startswith("nameIngredient"):
value = key[15:]
ing_dict[request.POST[key]] = (
request.POST["valueIngredient_" + value],
request.POST["unitsIngredient_" + value]
)
return ing_dict
def tag_collect(request):
"""Собирает теги для фильтрации рецептов на странице"""
tags = []
for label, _ in TAG_CHOICES:
if request.GET.get(label, ""):
tags.append(label)
if tags:
or_condition = Q()
for i in tags:
or_condition.add(Q(tags__contains=i), Q.OR)
return tags, or_condition
else:
return tags, None
|
{"/recipes/views.py": ["/api/models.py", "/foodgram/settings.py", "/recipes/auxiliary.py", "/recipes/models.py"], "/api/views.py": ["/recipes/models.py", "/api/models.py"], "/recipes/templatetags/recipes_tag_filter.py": ["/api/models.py", "/recipes/models.py"], "/api/models.py": ["/recipes/models.py"], "/recipes/auxiliary.py": ["/recipes/models.py"], "/recipes/admin.py": ["/recipes/models.py"]}
|
29,746
|
fattybobcat/foodgram-project
|
refs/heads/master
|
/api/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path("ingredients",
views.ingredient_hints,
name="ingredient_hints"),
path("favorites",
views.FavoriteApi.as_view(),
name="favorites"),
path("favorites/<int:id>",
views.FavoriteApi.as_view(),
name="favorite_delete"),
path("subscriptions",
views.SubscriptionApi.as_view(),
name="subscriptions"),
path("subscriptions/<int:id>",
views.SubscriptionApi.as_view(),
name="subscriptions_delete"),
path("purchases",
views.WishlistApi.as_view(),
name="purchases"),
path("purchases/<int:id>",
views.WishlistApi.as_view(),
name="purchases_delete"),
]
|
{"/recipes/views.py": ["/api/models.py", "/foodgram/settings.py", "/recipes/auxiliary.py", "/recipes/models.py"], "/api/views.py": ["/recipes/models.py", "/api/models.py"], "/recipes/templatetags/recipes_tag_filter.py": ["/api/models.py", "/recipes/models.py"], "/api/models.py": ["/recipes/models.py"], "/recipes/auxiliary.py": ["/recipes/models.py"], "/recipes/admin.py": ["/recipes/models.py"]}
|
29,747
|
fattybobcat/foodgram-project
|
refs/heads/master
|
/recipes/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("recipes/<int:recipe_id>",
views.recipe_single,
name="recipe_single"),
path("recipes/new/",
views.new_recipe,
name="recipe_new"),
path("recipes/edit/<int:recipe_id>/",
views.EditRecipe.as_view(),
name="recipe_edit"),
path("recipes/edit/<int:recipe_id>/delete/",
views.recipe_delete,
name="recipe_delete"),
path("follow/",
views.follow_index,
name="follow_index"),
path("favorite/",
views.favorite,
name="favorite_recipes"),
path("shopping_list/",
views.shopping_list,
name="shopping_list"),
path("shopping_list/download",
views.download_wishlist,
name="download_wishlist"),
path("about/", views.about, name="about"),
path("tech/", views.tech, name="tech"),
path("user/<username>/", views.profile, name="profile"),
]
|
{"/recipes/views.py": ["/api/models.py", "/foodgram/settings.py", "/recipes/auxiliary.py", "/recipes/models.py"], "/api/views.py": ["/recipes/models.py", "/api/models.py"], "/recipes/templatetags/recipes_tag_filter.py": ["/api/models.py", "/recipes/models.py"], "/api/models.py": ["/recipes/models.py"], "/recipes/auxiliary.py": ["/recipes/models.py"], "/recipes/admin.py": ["/recipes/models.py"]}
|
29,748
|
fattybobcat/foodgram-project
|
refs/heads/master
|
/users/urls.py
|
from django.urls import include, path
from . import views
urlpatterns = [
path('reg/', views.SignUp.as_view(), name='reg'),
path("", include("django.contrib.auth.urls")),
]
|
{"/recipes/views.py": ["/api/models.py", "/foodgram/settings.py", "/recipes/auxiliary.py", "/recipes/models.py"], "/api/views.py": ["/recipes/models.py", "/api/models.py"], "/recipes/templatetags/recipes_tag_filter.py": ["/api/models.py", "/recipes/models.py"], "/api/models.py": ["/recipes/models.py"], "/recipes/auxiliary.py": ["/recipes/models.py"], "/recipes/admin.py": ["/recipes/models.py"]}
|
29,749
|
fattybobcat/foodgram-project
|
refs/heads/master
|
/foodgram/settings.py
|
import os
from dotenv import load_dotenv
load_dotenv()
dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '1v%g#)q&7ta9sxe9l5)z603@5@%ho8jdxzj930zm2eq8mympwz'
DEBUG = True
ALLOWED_HOSTS = ['*']
SITE_ID = 1
INSTALLED_APPS = [
'recipes',
'users',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
'api',
'django_filters',
'multiselectfield',
'sorl.thumbnail',
'django_admin_multiple_choice_list_filter',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'foodgram.urls'
TEMPLATES_DIR = os.path.join(BASE_DIR, "templates")
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'recipes.context_processors.shop_list_size',
],
},
},
]
WSGI_APPLICATION = 'foodgram.wsgi.application'
DATABASES = {
'default': {
'ENGINE': os.environ.get('DB_ENGINE',
'django.db.backends.postgresql'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('POSTGRES_USER'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'HOST': os.environ.get('DB_HOST'),
'PORT': os.environ.get('DB_PORT'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
if DEBUG:
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
else:
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
LOGIN_URL = '/auth/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
# EMAIL_FILE_PATH = os.path.join(BASE_DIR, "sent_emails")
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = os.environ.get('MAIL_SENDER')
EMAIL_HOST_PASSWORD = os.environ.get('PASSWORD_MAIL_SENDER')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# EMAIL_PORT = 465
# EMAIL_USE_SSL = True
SERVER_EMAIL = EMAIL_HOST_USER
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
# EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
# EMAIL_FILE_PATH = os.path.join(BASE_DIR, "sent_emails")
COUNT_RECIPE = 6
|
{"/recipes/views.py": ["/api/models.py", "/foodgram/settings.py", "/recipes/auxiliary.py", "/recipes/models.py"], "/api/views.py": ["/recipes/models.py", "/api/models.py"], "/recipes/templatetags/recipes_tag_filter.py": ["/api/models.py", "/recipes/models.py"], "/api/models.py": ["/recipes/models.py"], "/recipes/auxiliary.py": ["/recipes/models.py"], "/recipes/admin.py": ["/recipes/models.py"]}
|
29,750
|
fattybobcat/foodgram-project
|
refs/heads/master
|
/recipes/context_processors.py
|
def shop_list_size(request):
if request.user.is_authenticated:
count = request.user.wishlist_subscriber.all().count()
else:
count = 0
return {
"shop_list_size": count
}
|
{"/recipes/views.py": ["/api/models.py", "/foodgram/settings.py", "/recipes/auxiliary.py", "/recipes/models.py"], "/api/views.py": ["/recipes/models.py", "/api/models.py"], "/recipes/templatetags/recipes_tag_filter.py": ["/api/models.py", "/recipes/models.py"], "/api/models.py": ["/recipes/models.py"], "/recipes/auxiliary.py": ["/recipes/models.py"], "/recipes/admin.py": ["/recipes/models.py"]}
|
29,751
|
fattybobcat/foodgram-project
|
refs/heads/master
|
/recipes/admin.py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.db.models import Q
from django_admin_multiple_choice_list_filter.list_filters import \
MultipleChoiceListFilter
from .models import Ingredient, IngredientAmount, Recipe, User
TAG_CHOICES = [
("breakfast", "Завтрак"),
("lunch", "Обед"),
("dinner", "Ужин"),
]
class IngredientAmountInline(admin.TabularInline):
model = IngredientAmount
min_num = 1
def tag_filt(request):
"""Собирает теги для фильтрации рецептов на странице"""
tags = request
if tags:
or_condition = Q()
for i in tags:
or_condition.add(Q(tags__contains=i), Q.OR)
return tags, or_condition
else:
return tags, None
class TagsListFilter(MultipleChoiceListFilter):
title = 'tags'
parameter_name = 'tags__contains'
def lookups(self, request, model_admin):
return TAG_CHOICES
def queryset(self, request, queryset):
if request.GET.get(self.parameter_name):
a = request.GET[self.parameter_name].split(",")
tags, tags_filter = tag_filt(a)
if tags_filter:
queryset = queryset.filter(tags_filter).distinct()
return queryset
class UserAdmin(BaseUserAdmin):
list_filter = ('first_name', 'email')
class RecipeAdmin(admin.ModelAdmin):
list_display = ("pk", "title", "time", "description",
"pub_date", "author", "count_favorite", "tags")
search_fields = ("title", "tags", )
list_filter = ("pub_date", "author", TagsListFilter)
empty_value_display = "-пусто-"
inlines = [
IngredientAmountInline,
]
autocomplete_fields = ("ingredients",)
def count_favorite(self, obj):
return obj.favorite_recipe.count()
count_favorite.short_description = "в избранном кол."
class IngredientAdmin(admin.ModelAdmin):
list_display = ("pk", "title", "dimension")
search_fields = ("title",)
list_filter = ("title",)
empty_value_display = "-пусто-"
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Recipe, RecipeAdmin)
admin.site.register(Ingredient, IngredientAdmin)
|
{"/recipes/views.py": ["/api/models.py", "/foodgram/settings.py", "/recipes/auxiliary.py", "/recipes/models.py"], "/api/views.py": ["/recipes/models.py", "/api/models.py"], "/recipes/templatetags/recipes_tag_filter.py": ["/api/models.py", "/recipes/models.py"], "/api/models.py": ["/recipes/models.py"], "/recipes/auxiliary.py": ["/recipes/models.py"], "/recipes/admin.py": ["/recipes/models.py"]}
|
29,755
|
AlokD123/DisasterHack
|
refs/heads/master
|
/tweet.py
|
#!usr/bin/env python3.7
import tweepy
import time
from sensor_test import getInput
from tweepy.auth import OAuthHandler
API_KEY='KZ6deQGfupfWNG1Ab8NcNBz9V'
API_SECRET='1gH4yPTIS5RqzQ1Jx9KIcXQ5lupSXNZyTrpDHVyV2nrStiSYz6'
ACCESS_TOKEN='973718052561879041-zDzyVVhgUoGk6kSx67G1okYT1aFGzzW'
ACCESS_TOKEN_SECRET='2L7MGvPx5ztz4AFcY80pj8MD8btCs8u6qogxnbo1LFyBC'
while 1:
x=getInput()
temp=float(x[1])
location="55 St George St, Toronto= St.George"
auth=tweepy.OAuthHandler(API_KEY,API_SECRET)
auth.set_access_token(ACCESS_TOKEN,ACCESS_TOKEN_SECRET)
api=tweepy.API(auth)
if(temp>33.0):
api.update_status(" High Temperature Alert!! Temperature: "+str(round(temp,1))+" degrees Celsius \nLocation: 55 St George St, Toronto\n" + "#torontopolice "+"#firestation "+"#breakingnews "+"#nourishnew ")
print("Sent!")
time.sleep(5)
|
{"/tweet.py": ["/sensor_test.py"], "/take_pic.py": ["/sensor_test.py"]}
|
29,756
|
AlokD123/DisasterHack
|
refs/heads/master
|
/solace_publish.py
|
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
import time
import json
# initialize device
# Connection parms for Solace Event Broker
solace_url = "mr2aqty0xnecd5.messaging.solace.cloud"
#solace_url = "mqtt.eclipse.org"
solace_port = 21062
solace_user = "solace-cloud-client"
solace_passwd = "80rkel9bt7789ja91pgls7snl"
solace_clientid = "vats_id"
solace_topic_temp = "devices/temperature/events"
solace_topic_humidity = "devices/humidity/events"
payload = "Hello from Raspberry Pi"
# MQTT Client Connectivity to Solace Event Broker
client = mqtt.Client(solace_clientid)
client.username_pw_set(username=solace_user,password=solace_passwd)
print ("Connecting to solace {}:{} as {}". format(solace_url, solace_port, solace_user))
client.connect(solace_url, port=solace_port)
client.loop_start()
# Publish Sensor streams to Solace Ebent Broker
while True:
temp,humidity,latitude,longitude = 1,2,3,4 #Get data here
#print("Temp: %d C" % result.temperature +' '+"Humid: %d %%" % result.humidity)
# Read Temp and humidity sensotr outputs
temp_payload = temp
hum_payload = humidity
#print("Streaming sensor events to Solace")
# Construct JSON sensor output string
temp_payload = {"timestamp": int(time.time()), "device": "Temperature", "Temperature": temp_payload}
temp_payload = json.dumps(temp_payload,indent=4)
print (temp_payload)
hum_payload = {"timestamp": int(time.time()), "device": "Humidity", "Humidity": hum_payload}
hum_payload = json.dumps(hum_payload, indent=4)
print (hum_payload)
# Publish Json event to Solace Event Broker
client.publish(solace_topic_temp, temp_payload, qos=1)
client.publish(solace_topic_humidity,hum_payload, qos=1)
time.sleep(1)
client.loop_stop()
client.disconnect()
|
{"/tweet.py": ["/sensor_test.py"], "/take_pic.py": ["/sensor_test.py"]}
|
29,757
|
AlokD123/DisasterHack
|
refs/heads/master
|
/take_pic.py
|
import paho.mqtt.client as mqtt
import time
import json
from sensor_test import getInput
from picamera import PiCamera
import array
import base64
def on_publish(mosq,user_data,mid):
pass
solace_url = "mr2aqty0xnecd5.messaging.solace.cloud"
#solace_url = "mqtt.eclipse.org"
solace_port = 21062
solace_user = "solace-cloud-client"
solace_passwd = "80rkel9bt7789ja91pgls7snl"
solace_clientid = "vats_id1"
solace_topic_temp = "devices/camera/events"
client=mqtt.Client(solace_clientid)
client.username_pw_set(solace_user,solace_passwd)
client._on_publish = on_publish
camera=PiCamera()
client.connect(solace_url,solace_port)
while 1:
ret = getInput()
temp,humidity,lat,longitude=ret[0],ret[1],ret[2][0],ret[2][1]
print(temp, humidity, lat, longitude)
if float(temp)>0:
print("capture")
camera.start_preview()
time.sleep(1)
camera.capture('LastCapture.jpg')
camera.stop_preview()
f=open('LastCapture.jpg','rb')
content=f.read()
byte_arr=list(content)
#byte_arr = base64.b64encode(content)
print(type(byte_arr))
camera_payload = {"timestamp":int(time.time()), "feature":"camera","Pic":byte_arr} #1
#camera_payload = {'timestamp':int(time.time()), 'feature':'Camera', 'Pic': byte_arr}
#camera_payload = json.dumps(camera_payload,indent=4) #3
#camera_payload = json.JSONEncoder().encode(byte_arr)
#client.publish('devices/1/camera/events',camera_payload) #5
gps = [{"lat":lat,"lng":longitude}]
gps_payload = gps
gps_payload = json.dumps(gps_payload)
if temp>20:
client.publish('devices/1/gps/events',gps_payload) #Publish site only if e.g. temperature threshold
temperature_payload = {"timestamp":int(time.time()),"temperature":temp}
temperature_payload = json.dumps(temperature_payload,indent=4)
client.publish('devices/1/temperature/events',temperature_payload)
humidity_payload = {"timestamp":int(time.time()), "feature":humidity}
humidity_payload = json.dumps(humidity_payload,indent=4)
client.publish('devices/1/humidity/events',humidity_payload)
client.loop_forever()
|
{"/tweet.py": ["/sensor_test.py"], "/take_pic.py": ["/sensor_test.py"]}
|
29,758
|
AlokD123/DisasterHack
|
refs/heads/master
|
/sensor_test.py
|
'''
sensor_test.py - This is basic sensor_test example.
Created by Yasin Kaya (selengalp), August 28, 2018.
'''
from cellulariot import cellulariot
import time
import geocoder
def getInput():
node = cellulariot.CellularIoTApp()
node.setupGPIO()
node.disable()
time.sleep(1)
node.enable()
g = geocoder.ip('me')
node.turnOnRelay()
time.sleep(2)
node.turnOffRelay()
time.sleep(0.5)
node.turnOnUserLED()
time.sleep(2)
node.turnOffUserLED()
return [str(node.readTemp()),str(node.readHum()),g.latlng]
|
{"/tweet.py": ["/sensor_test.py"], "/take_pic.py": ["/sensor_test.py"]}
|
29,762
|
Ti-Bi/algorithm_py
|
refs/heads/master
|
/test/algorithm/ValueEvaluatorTest.py
|
__author__ = 'Anatol Bludau'
from unittest import TestCase
from algorithm.ValueEvaluator import ValueEvaluator
class ValueEvaluatorTest(TestCase):
"""
The set of testes for testing value evaluators.
"""
#-------------------------------------------------------------------------------------------
### eval_reverse_polish_notation_empty_string()
#-------------------------------------------------------------------------------------------
def test_eval_reverse_polish_notation_empty_string(self):
res = ValueEvaluator.eval_reverse_polish_notation("")
self.assertIsNone(res)
def test_eval_reverse_polish_notation_string_none(self):
res = ValueEvaluator.eval_reverse_polish_notation(None)
self.assertIsNone(res)
def test_eval_reverse_polish_notation_string_simple_sum(self):
val = "2 3 +"
res = ValueEvaluator.eval_reverse_polish_notation(val)
self.assertEqual(5, res)
def test_eval_reverse_polish_notation_string_simple_sub(self):
val = "2 3 -"
res = ValueEvaluator.eval_reverse_polish_notation(val)
self.assertEqual(-1, res)
def test_eval_reverse_polish_notation_string_simple_multiply(self):
val = "2 3 *"
res = ValueEvaluator.eval_reverse_polish_notation(val)
self.assertEqual(6, res)
def test_eval_reverse_polish_notation_string_simple_sum(self):
val = "2 3 /"
res = ValueEvaluator.eval_reverse_polish_notation(val)
self.assertEqual(float(2) / 3, res)
def test_eval_reverse_polish_notation_list_empty_list(self):
res = ValueEvaluator.eval_reverse_polish_notation_list([])
self.assertIsNone(res)
def test_eval_reverse_polish_notation_list_none(self):
res = ValueEvaluator.eval_reverse_polish_notation_list(None)
self.assertIsNone(res)
def test_eval_reverse_polish_notation_list_simple_case_plus(self):
val = [1, 2, "+"]
res = ValueEvaluator.eval_reverse_polish_notation_list(val)
self.assertEqual(3, res)
def test_eval_reverse_polish_notation_list_simple_case_sub(self):
val = [1, 2, "-"]
res = ValueEvaluator.eval_reverse_polish_notation_list(val)
self.assertEqual(-1, res)
def test_eval_reverse_polish_notation_list_simple_case_multiplication(self):
val = [1, 2, "*"]
res = ValueEvaluator.eval_reverse_polish_notation_list(val)
self.assertEqual(2, res)
def test_eval_reverse_polish_notation_list_simple_case_division(self):
val = [1, 2, "/"]
res = ValueEvaluator.eval_reverse_polish_notation_list(val)
self.assertAlmostEqual(0.5, res)
def test_eval_reverse_polish_notation_list_complex_formula(self):
val = [1, 2, "+", 4, "*", 6, "/", 4, '-']
res = ValueEvaluator.eval_reverse_polish_notation_list(val)
self.assertAlmostEqual(-2, res)
|
{"/src/algorithm/__init__.py": ["/src/algorithm/Shuffle.py", "/src/algorithm/Sorting.py", "/src/algorithm/ValueEvaluator.py", "/src/algorithm/StringOperations.py"]}
|
29,763
|
Ti-Bi/algorithm_py
|
refs/heads/master
|
/src/algorithm/Shuffle.py
|
import random
__author__ = 'Anatol Bludau'
class Shuffle(object):
@classmethod
def linear_shuffle(cls, lst):
for i in list(range(1, len(lst))):
index_for_switch = random.randrange(i)
lst[i], lst[index_for_switch] = lst[index_for_switch], lst[i]
return lst
|
{"/src/algorithm/__init__.py": ["/src/algorithm/Shuffle.py", "/src/algorithm/Sorting.py", "/src/algorithm/ValueEvaluator.py", "/src/algorithm/StringOperations.py"]}
|
29,764
|
Ti-Bi/algorithm_py
|
refs/heads/master
|
/src/algorithm/ValueEvaluator.py
|
__author__ = 'Anatol Bludau'
class ValueEvaluator(object):
__rpn_operators_impl = {
'+': (lambda a, b: a + b),
'-': (lambda a, b: a - b),
'*': (lambda a, b: a * b),
'/': (lambda a, b: float(a) / b)
}
__rpn_operators = ''.join(__rpn_operators_impl.keys())
@classmethod
def eval_reverse_polish_notation(cls, str_input):
"""
This method evaluates the expression given in input string represented as an Reverse Polish Notation.
:param str_input: input string with reverse polish notation.
All values and operators should be separated by spaces.
:return: evaluated value or None if list is empty
"""
if not str_input:
return None
token_list = str_input.split()
return cls.eval_reverse_polish_notation_list(token_list)
@classmethod
def eval_reverse_polish_notation_list(cls, list_input):
"""
This method the same with cls.eval_reverse_polish_notation(), but gets a list as parameter.
:param list_input: input list with reverse polish notation.
:return: evaluated value or None if list is empty
"""
if not list_input:
return None
accepted_operands = "+-*/"
stack = []
for token in list_input:
if type(token) is str and token in accepted_operands:
val2 = int(stack.pop())
val1 = int(stack.pop())
func_for_eval = cls.__rpn_operators_impl[token]
evaluated_val = func_for_eval(val1, val2)
stack.append(evaluated_val)
else:
stack.append(token)
return stack.pop()
|
{"/src/algorithm/__init__.py": ["/src/algorithm/Shuffle.py", "/src/algorithm/Sorting.py", "/src/algorithm/ValueEvaluator.py", "/src/algorithm/StringOperations.py"]}
|
29,765
|
Ti-Bi/algorithm_py
|
refs/heads/master
|
/test/utils/__init__.py
|
__author__ = 'anatolbludau'
|
{"/src/algorithm/__init__.py": ["/src/algorithm/Shuffle.py", "/src/algorithm/Sorting.py", "/src/algorithm/ValueEvaluator.py", "/src/algorithm/StringOperations.py"]}
|
29,766
|
Ti-Bi/algorithm_py
|
refs/heads/master
|
/test/algorithm/StringOperationsTest.py
|
__author__ = 'Anatol Bludau'
from unittest import TestCase
from algorithm.StringOperations import StringOperations
class StringOperationsTest(TestCase):
"""
-------------------------------------------------------------------------------------------
### is_palindromic_string()
-------------------------------------------------------------------------------------------
"""
def test_is_palindromic_string_simple_case(self):
res = StringOperations.is_palindromic_string("abba")
self.assertTrue(res)
def test_is_palindromic_string_fails(self):
res = StringOperations.is_palindromic_string("fsfsdfsggd")
self.assertFalse(res)
def test_is_palindromic_string_empty(self):
res = StringOperations.is_palindromic_string("")
self.assertFalse(res)
def test_is_palindromic_string_none(self):
res = StringOperations.is_palindromic_string(None)
self.assertFalse(res)
def test_is_palindromic_string_one_symbol(self):
res = StringOperations.is_palindromic_string("f")
self.assertTrue(res)
"""
-------------------------------------------------------------------------------------------
### find_the_largest_palindromic_substring_brut_force()
-------------------------------------------------------------------------------------------
"""
def test_find_the_largest_palindromic_substring_brut_force_simple_case(self):
res = StringOperations.find_the_largest_palindromic_substring_brut_force("testtset")
self.assertEqual("testtset", res)
def test_find_the_largest_palindromic_substring_brut_force_empty_string(self):
res = StringOperations.find_the_largest_palindromic_substring_brut_force("")
self.assertIsNone(res)
def test_find_the_largest_palindromic_substring_brut_force_none_string(self):
res = StringOperations.find_the_largest_palindromic_substring_brut_force(None)
self.assertIsNone(res)
def test_find_the_largest_palindromic_substring_brut_force_complicated_case(self):
res = StringOperations.find_the_largest_palindromic_substring_brut_force("sd;glolsd sk alskdghh ;asdkgstesttsetfjalsdhgakjsgupagdfap9weog")
self.assertEqual("testtset", res)
def test_find_the_largest_palindromic_substring_brut_force_string_without_palindromic_substring(self):
res = StringOperations.find_the_largest_palindromic_substring_brut_force("abcdefghijklmnopqrstuvwxyz")
self.assertIsNone(res)
def test_find_the_largest_palindromic_substring_brut_force_one_letter(self):
res = StringOperations.find_the_largest_palindromic_substring_brut_force("f")
self.assertEqual("f", res)
"""
-------------------------------------------------------------------------------------------
### find_the_largest_palindromic_substring_matrix()
-------------------------------------------------------------------------------------------
"""
"""
def test_find_the_largest_palindromic_substring_matrix_simple_case(self):
res = StringOperations.find_the_largest_palindromic_substring_matrix("testtset")
self.assertEqual("testtset", res)
def test_find_the_largest_palindromic_substring_matrix_empty_string(self):
res = StringOperations.find_the_largest_palindromic_substring_matrix("")
self.assertIsNone(res)
def test_find_the_largest_palindromic_substring_matrix_none_string(self):
res = StringOperations.find_the_largest_palindromic_substring_matrix(None)
self.assertIsNone(res)
def test_find_the_largest_palindromic_substring_matrix_complicated_case(self):
res = StringOperations.find_the_largest_palindromic_substring_matrix("sd;glolsd sk alskdghh ;asdkgstesttsetfjalsdhgakjsgupagdfap9weog")
self.assertEqual("testtset", res)
def test_find_the_largest_palindromic_substring_matrix_string_without_palindromic_substring(self):
res = StringOperations.find_the_largest_palindromic_substring_matrix("abcdefghijklmnopqrstuvwxyz")
self.assertIsNone(res)
def test_find_the_largest_palindromic_substring_matrix_one_letter(self):
res = StringOperations.find_the_largest_palindromic_substring_matrix("f")
self.assertEqual("f", res)
def test_find_the_largest_palindromic_substring_matrix_one_letter(self):
res = StringOperations.find_the_largest_palindromic_substring_matrix("hhyasag")
self.assertEqual("asa", res)
"""
|
{"/src/algorithm/__init__.py": ["/src/algorithm/Shuffle.py", "/src/algorithm/Sorting.py", "/src/algorithm/ValueEvaluator.py", "/src/algorithm/StringOperations.py"]}
|
29,767
|
Ti-Bi/algorithm_py
|
refs/heads/master
|
/src/algorithm/Sorting.py
|
import random
__author__ = 'Anatol Bludau'
class Sorting(object):
@classmethod
def selection_sort(cls, lst):
"""
The simple implementation of selection sort.
:rtype : sorted input list
:param lst: list for sorting
"""
for i in range(len(lst)):
min_index = i
min_element = lst[i]
for j in range(i + 1, len(lst)):
if lst[j] < min_element:
min_element = lst[j]
min_index = j
# exchange the elements
lst[i], lst[min_index] = lst[min_index], lst[i]
return lst
@classmethod
def insertion_sort(cls, lst):
"""
The simple implementation of insertion sort.
:rtype : sorted input list
:param lst: list for sorting
"""
for i in range(1, len(lst)):
j = i
while j > 0 and lst[j - 1] > lst[j]:
lst[j - 1], lst[j] = lst[j], lst[j - 1]
j -= 1
return lst
@classmethod
def merge_sort(cls, lst):
"""
The simple implementation of merge sort.
:rtype : sorted input list
:param lst: list for sorting
"""
if len(lst) <= 1:
return lst
else:
middle = len(lst) // 2
return cls.__merge(cls.merge_sort(lst[:middle]), cls.merge_sort(lst[middle:]))
@classmethod
def quick_sort(cls, lst):
"""
The simple implementation of quick sort
:rtype : sorted list (same object with lst)
:param lst: list for sorting
"""
#for reduce influence from input
random.shuffle(lst)
cls.__quick_sort(lst, 0, len(lst) - 1)
@classmethod
def partition(cls, lst, start_index, end_index):
"""
Separating list on two parts. First element is used as a base element.
At the result list all of elements witch is less than base place to the left of it
and witch is greater to the right.
:rtype : index of the base element in the result or start_index, if there only one element
:param lst: the list for the partition
:param start_index: the index from partition will be started (index of the base element)
:param end_index: the index to partition will be ended
"""
if end_index <= start_index:
return start_index
else:
base = lst[start_index]
i = start_index + 1
j = end_index
while True:
while lst[i] < base:
i += 1
if i >= end_index:
break
while lst[j] > base:
j -= 1
if j <= start_index:
break
if i >= j:
break
lst[i], lst[j] = lst[j], lst[i]
lst[start_index], lst[j] = lst[j], lst[start_index]
return j
@classmethod
def __merge(cls, left, right):
"""
The simple merging of two lists for merge sort.
:rtype : merged list
:param left: left part
:param right: right part
"""
result = []
i = j = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
result += left[i:]
result += right[j:]
return result
@classmethod
def __quick_sort(cls, lst, start_index, end_index):
"""
The recursive implementation of quick sort.
:rtype : sorted list (same object with lst)
:param lst: list for sorting
:param start_index: start index of sub list for sort
:param end_index: end index of sub list for sort
"""
if end_index <= start_index:
return lst
else:
middle_index = cls.partition(lst, start_index, end_index)
cls.__quick_sort(lst, start_index, middle_index - 1)
cls.__quick_sort(lst, middle_index + 1, end_index)
return lst
|
{"/src/algorithm/__init__.py": ["/src/algorithm/Shuffle.py", "/src/algorithm/Sorting.py", "/src/algorithm/ValueEvaluator.py", "/src/algorithm/StringOperations.py"]}
|
29,768
|
Ti-Bi/algorithm_py
|
refs/heads/master
|
/test/algorithm/SortingTest.py
|
import random
from unittest import TestCase
from algorithm import Sorting
__author__ = 'Anatol Bludau'
class TestSorting(TestCase):
def setUp(self):
self.sorted_seq = list(range(100))
self.sorted_seq_with_doubles = list(range(100)) + list(range(35, 75))
self.empty_list = []
# creation of shuffled lists
self.seq = list(self.sorted_seq)
self.seq_with_doubles = list(self.sorted_seq_with_doubles)
random.shuffle(self.seq)
random.shuffle(self.seq_with_doubles)
def test_selection_sort(self):
Sorting.selection_sort(self.seq)
self.assertListEqual(self.sorted_seq, self.seq)
def test_selection_sort_with_empty_list(self):
Sorting.selection_sort(self.empty_list)
self.assertListEqual([], self.empty_list)
def test_insertion_sort(self):
Sorting.insertion_sort(self.seq)
self.assertListEqual(self.sorted_seq, self.seq)
def test_insertion_sort_with_empty_list(self):
Sorting.selection_sort(self.empty_list)
self.assertListEqual([], self.empty_list)
def test_merge_sort(self):
sorted_list = Sorting.merge_sort(self.seq)
self.assertListEqual(self.sorted_seq, sorted_list)
def test_merge_sort_with_doubles(self):
sorted_list = Sorting.merge_sort(self.seq_with_doubles)
self.assertListEqual(self.sorted_seq_with_doubles, sorted_list)
def test_merge_sort_with_doubles(self):
sorted_list = Sorting.merge_sort([])
self.assertListEqual([], sorted_list)
def test_partition(self):
r_seq = list(range(55)) + list(range(56, 100))
random.shuffle(r_seq)
r_seq = [55] + r_seq
self.assertEquals(55, Sorting.partition(r_seq, 0, len(r_seq)-1))
def test_partition_with_one_element(self):
lst_for_partition = [3]
Sorting.partition(lst_for_partition, 0, 0)
self.assertListEqual([3], lst_for_partition)
def test_partition_consistence(self):
Sorting.partition(self.seq, 0, len(self.seq)-1)
self.assertListEqual(self.sorted_seq, sorted(self.seq))
def test_quick_sort(self):
Sorting.quick_sort(self.seq)
self.assertListEqual(self.sorted_seq, self.seq)
|
{"/src/algorithm/__init__.py": ["/src/algorithm/Shuffle.py", "/src/algorithm/Sorting.py", "/src/algorithm/ValueEvaluator.py", "/src/algorithm/StringOperations.py"]}
|
29,769
|
Ti-Bi/algorithm_py
|
refs/heads/master
|
/src/algorithm/__init__.py
|
__author__ = 'Anatol Bludau'
from .Shuffle import Shuffle
from .Sorting import Sorting
from .ValueEvaluator import ValueEvaluator
from .StringOperations import StringOperations
|
{"/src/algorithm/__init__.py": ["/src/algorithm/Shuffle.py", "/src/algorithm/Sorting.py", "/src/algorithm/ValueEvaluator.py", "/src/algorithm/StringOperations.py"]}
|
29,770
|
Ti-Bi/algorithm_py
|
refs/heads/master
|
/src/utils/matrix/MatrixBypass.py
|
__author__ = 'anatolbludau'
class MatrixBypass(object):
__simple_print = lambda x: print(str(x), end="\t")
__print_new_line = lambda x: print()
@classmethod
def simple(cls, matrix, on_row=__print_new_line, on_element=__simple_print):
if not matrix:
return
for i in matrix:
for j in i:
on_element(j)
on_row(i)
|
{"/src/algorithm/__init__.py": ["/src/algorithm/Shuffle.py", "/src/algorithm/Sorting.py", "/src/algorithm/ValueEvaluator.py", "/src/algorithm/StringOperations.py"]}
|
29,771
|
Ti-Bi/algorithm_py
|
refs/heads/master
|
/test/utils/matrix/MatrixBypassTest.py
|
__author__ = 'anatolbludau'
from unittest import TestCase
from utils.matrix.MatrixBypass import MatrixBypass
class MatrixBypassTest(TestCase):
def test_simple(self):
matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
MatrixBypass.simple(matrix)
def test_simple_none(self):
MatrixBypass.simple(None)
def test_simple_empty(self):
MatrixBypass.simple([])
def test_sample_empty_row(self):
MatrixBypass.simple([[]])
|
{"/src/algorithm/__init__.py": ["/src/algorithm/Shuffle.py", "/src/algorithm/Sorting.py", "/src/algorithm/ValueEvaluator.py", "/src/algorithm/StringOperations.py"]}
|
29,772
|
Ti-Bi/algorithm_py
|
refs/heads/master
|
/src/algorithm/StringOperations.py
|
__author__ = 'Anatol Bludau'
class StringOperations(object):
"""
The class includes algorithms, related with string processing.
"""
@classmethod
def find_the_largest_palindromic_substring_brut_force(cls, string):
"""
Finds the larges palindromic substring in input string.
:param string: string for test
:return: the largest palindromic substring or None if it isn't exist
"""
if not string:
return None
string_len = len(string)
if string_len == 1:
return string
longest_substring_length = 0
longest_substring = None
for i in range(string_len):
for j in range(i + 1, string_len):
current_substring_len = j - i
current_substring = string[i:j+1]
if cls.is_palindromic_string(current_substring) and current_substring_len > longest_substring_length:
longest_substring = current_substring
longest_substring_length = current_substring_len
return longest_substring
@classmethod
def find_the_largest_palindromic_substring_matrix(cls, string):
"""
Finds the larges palindromic substring in input string.
:param string: string for test
:return: the largest palindromic substring or None if it isn't exist
"""
if not string:
return None
string_len = len(string)
if string_len == 1:
return string
# create the initial matrix
matrix = [[0 for i in range(j+1)] for j in range(string_len)]
# fill matrix
for i in range(string_len):
matrix[i][i] = 1
for i in range(string_len - 1):
if string[i] == string[i+1]:
matrix[i+1][i] = 1
for i in range(string_len-1):
for j in range(i, string_len):
if string[i] == string[j]:
matrix[j][i] = 1
@classmethod
def is_palindromic_string(cls, string):
"""
Checks if the input string is palindromic.
:param string: input string
:return: True if the input string not empty and palindromic. False in another cases.
"""
if not string:
return False
for i in range(len(string)):
if string[i] is not string[-i-1]:
return False
return True
|
{"/src/algorithm/__init__.py": ["/src/algorithm/Shuffle.py", "/src/algorithm/Sorting.py", "/src/algorithm/ValueEvaluator.py", "/src/algorithm/StringOperations.py"]}
|
29,773
|
Ti-Bi/algorithm_py
|
refs/heads/master
|
/test/algorithm/ShuffleTest.py
|
from unittest import TestCase
from algorithm import Shuffle
__author__ = 'Anatol Bludau'
class TestShuffle(TestCase):
def setUp(self):
self.seq = list(range(10))
def test_linear_shuffle(self):
Shuffle.linear_shuffle(self.seq)
self.seq.sort()
self.assertEqual(self.seq, list(range(10)))
def test_linear_shuffle_for_empty_param(self):
lst = []
Shuffle.linear_shuffle(lst)
self.assertEquals(lst, [])
|
{"/src/algorithm/__init__.py": ["/src/algorithm/Shuffle.py", "/src/algorithm/Sorting.py", "/src/algorithm/ValueEvaluator.py", "/src/algorithm/StringOperations.py"]}
|
29,780
|
sudokid-software/bad_todo_django_app_assignment
|
refs/heads/master
|
/todo_django_app/api/views.py
|
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import Todo
from .serializers import TodoSerializer
class TodoView(APIView):
"""
2. Delete one or more TODOs.
3. Update one or more TODOs.
4. List all TODOs.
a. Able to filter TODOs by state and/or due-date.
"""
serializer_class = TodoSerializer
@staticmethod
def get_queryset(request):
state = request.query_params.get('state', None)
due_date = request.query_params.get('due-date', None)
if state and due_date:
return Todo.objects.filter(state=state, due_date=due_date)
if state:
return Todo.objects.filter(state=state)
if due_date:
return Todo.objects.filter(due_date=due_date)
return Todo.objects.all()
def get(self, request):
todos = self.get_queryset(request)
todos_serialized = self.serializer_class(todos, many=True)
return Response(todos_serialized.data, 200)
@staticmethod
def post(request):
data = request.data
if not isinstance(data, list):
return Response({'error': 'Invalid request'}, status=400)
todos_created = []
for todo in data:
new_todo = TodoSerializer(data=todo)
if not new_todo.is_valid():
return Response(new_todo.errors, status=400)
new_todo.save()
todos_created.append(new_todo.data)
return Response(todos_created, status=200)
@staticmethod
def delete(request):
todo_list = request.data
todos = Todo.objects.filter(pk__in=todo_list)
if len(todos) == 0:
return Response({'error': 'Not found', 'todos': todo_list}, status=404)
todos_serialized = TodoSerializer(todos, many=True).data
[todo.delete() for todo in todos]
return Response(todos_serialized, status=200)
@staticmethod
def patch(request):
todo_list = request.data
if not isinstance(todo_list, list):
return Response({'error': 'Invalid request'}, status=400)
todos_updated = []
for todo in todo_list:
pk = todo.get('id', False)
if not pk:
continue
try:
todo_record = Todo.objects.get(pk=pk)
todo_update = TodoSerializer(todo_record, data=todo, partial=True)
if todo_update.is_valid():
todo_update.save()
todos_updated.append(todo_update.data)
except Todo.DoesNotExist:
continue
return Response(todos_updated, status=200)
class TodoViewSingle(APIView):
@staticmethod
def get(request, pk=None):
try:
todo = Todo.objects.get(pk=pk)
except Todo.DoesNotExist:
return Response({'error': 'Not found'}, status=404)
todo_serialized = TodoSerializer(todo, many=False)
return Response(todo_serialized.data, status=200)
|
{"/todo_django_app/api/views.py": ["/todo_django_app/api/models.py"]}
|
29,781
|
sudokid-software/bad_todo_django_app_assignment
|
refs/heads/master
|
/todo_django_app/api/models.py
|
from django.db import models
class Todo(models.Model):
STATE_CHOICE = (
('todo', 'todo'),
('in-progress', 'in-progress'),
('done', 'done'),
)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
state = models.CharField(choices=STATE_CHOICE, max_length=254)
due_date = models.DateField()
description = models.TextField()
def __str__(self):
return self.description[0:25]
|
{"/todo_django_app/api/views.py": ["/todo_django_app/api/models.py"]}
|
29,782
|
sudokid-software/bad_todo_django_app_assignment
|
refs/heads/master
|
/todo_django_app/todo_django_app/urls.py
|
from django.contrib import admin
from django.urls import path
# from django.urls import path, include
# from rest_framework.routers import DefaultRouter
from api.views import TodoView, TodoViewSingle
# router = DefaultRouter()
# router.register('todo', TodoViewSet, base_name='todo')
urlpatterns = [
path('api/todo/', TodoView.as_view()),
path('api/todo/<int:pk>/', TodoViewSingle.as_view()),
path('admin/', admin.site.urls),
]
|
{"/todo_django_app/api/views.py": ["/todo_django_app/api/models.py"]}
|
29,785
|
crogan/PHSX815_Week2
|
refs/heads/master
|
/python/MySort.py
|
#! /usr/bin/env python
import sys
import numpy as np
# import our Random class from python/Random.py file
sys.path.append(".")
from python.Random import Random
#################
# MySort class
#################
# class to sort lists of objects in different ways
class MySort:
"""A crappy sorting class"""
# initialization method for Random class
def __init__(self, seed = 5555):
self.m_random = Random(seed)
# sorts array using bubble sort
def BubbleSort(self, array):
n = len(array)
for i in range(n):
# Create a flag that will allow the function to
# terminate early if there's nothing left to sort
already_sorted = True
# Start looking at each item of the list one by one,
# comparing it with its adjacent value. With each
# iteration, the portion of the array that you look at
# shrinks because the remaining items have already been
# sorted.
for j in range(n - i - 1):
if array[j] > array[j + 1]:
# If the item you're looking at is greater than its
# adjacent value, then swap them
array[j], array[j + 1] = array[j + 1], array[j]
# Since you had to swap two elements,
# set the `already_sorted` flag to `False` so the
# algorithm doesn't finish prematurely
already_sorted = False
# If there were no swaps during the last iteration,
# the array is already sorted, and you can terminate
if already_sorted:
break
return array
# sorts array using insertion sort
def InsertionSort(self, array):
# Loop from the second element of the array until
# the last element
for i in range(1, len(array)):
# This is the element we want to position in its
# correct place
key_item = array[i]
# Initialize the variable that will be used to
# find the correct position of the element referenced
# by `key_item`
j = i - 1
# Run through the list of items (the left
# portion of the array) and find the correct position
# of the element referenced by `key_item`. Do this only
# if `key_item` is smaller than its adjacent values.
while j >= 0 and array[j] > key_item:
# Shift the value one position to the left
# and reposition j to point to the next element
# (from right to left)
array[j + 1] = array[j]
j -= 1
# When you finish shifting the elements, you can position
# `key_item` in its correct location
array[j + 1] = key_item
return array
# sorts array using quicksort
def QuickSort(self, array):
# If the input array contains fewer than two elements,
# then return it as the result of the function
if len(array) < 2:
return array
low, same, high = [], [], []
# Select your `pivot` element randomly
pivot = array[int(self.m_random.rand()*len(array))]
for item in array:
# Elements that are smaller than the `pivot` go to
# the `low` list. Elements that are larger than
# `pivot` go to the `high` list. Elements that are
# equal to `pivot` go to the `same` list.
if item < pivot:
low.append(item)
elif item == pivot:
same.append(item)
elif item > pivot:
high.append(item)
# The final result combines the sorted `low` list
# with the `same` list and the sorted `high` list
return self.QuickSort(low) + same + self.QuickSort(high)
# sorts array using default Python sort
def DefaultSort(self, array):
array.sort()
return array
|
{"/python/MySort.py": ["/python/Random.py"], "/python/CoinToss.py": ["/python/Random.py"], "/python/CookieAnalysis.py": ["/python/MySort.py"], "/python/CoinAnalysis.py": ["/python/Random.py"]}
|
29,786
|
crogan/PHSX815_Week2
|
refs/heads/master
|
/python/CoinToss.py
|
#! /usr/bin/env python
# imports of external packages to use in our code
import sys
import numpy as np
# import our Random class from python/Random.py file
sys.path.append(".")
from python.Random import Random
# main function for our coin toss Python code
if __name__ == "__main__":
# if the user includes the flag -h or --help print the options
if '-h' in sys.argv or '--help' in sys.argv:
print ("Usage: %s [-seed number]" % sys.argv[0])
print
sys.exit(1)
# default seed
seed = 5555
# default single coin-toss probability for "1"
prob = 0.5
# default number of coin tosses (per experiment)
Ntoss = 1
# default number of experiments
Nexp = 1
# output file defaults
doOutputFile = False
# read the user-provided seed from the command line (if there)
if '-seed' in sys.argv:
p = sys.argv.index('-seed')
seed = sys.argv[p+1]
if '-prob' in sys.argv:
p = sys.argv.index('-prob')
ptemp = float(sys.argv[p+1])
if ptemp >= 0 and ptemp <= 1:
prob = ptemp
if '-Ntoss' in sys.argv:
p = sys.argv.index('-Ntoss')
Nt = int(sys.argv[p+1])
if Nt > 0:
Ntoss = Nt
if '-Nexp' in sys.argv:
p = sys.argv.index('-Nexp')
Ne = int(sys.argv[p+1])
if Ne > 0:
Nexp = Ne
if '-output' in sys.argv:
p = sys.argv.index('-output')
OutputFileName = sys.argv[p+1]
doOutputFile = True
# class instance of our Random class using seed
random = Random(seed)
if doOutputFile:
outfile = open(OutputFileName, 'w')
for e in range(0,Nexp):
for t in range(0,Ntoss):
outfile.write(str(random.Bernoulli(prob))+" ")
outfile.write(" \n")
outfile.close()
else:
for e in range(0,Nexp):
for t in range(0,Ntoss):
print(random.Bernoulli(prob), end=' ')
print(" ")
|
{"/python/MySort.py": ["/python/Random.py"], "/python/CoinToss.py": ["/python/Random.py"], "/python/CookieAnalysis.py": ["/python/MySort.py"], "/python/CoinAnalysis.py": ["/python/Random.py"]}
|
29,787
|
crogan/PHSX815_Week2
|
refs/heads/master
|
/python/CookieAnalysis.py
|
#! /usr/bin/env python
# imports of external packages to use in our code
import sys
import math
import numpy as np
import matplotlib.pyplot as plt
# import our Random class from python/Random.py file
sys.path.append(".")
from python.MySort import MySort
# main function for our CookieAnalysis Python code
if __name__ == "__main__":
haveInput = False
for i in range(1,len(sys.argv)):
if sys.argv[i] == '-h' or sys.argv[i] == '--help':
continue
InputFile = sys.argv[i]
haveInput = True
if '-h' in sys.argv or '--help' in sys.argv or not haveInput:
print ("Usage: %s [options] [input file]" % sys.argv[0])
print (" options:")
print (" --help(-h) print options")
print
sys.exit(1)
Nmeas = 1
times = []
times_avg = []
need_rate = True
with open(InputFile) as ifile:
for line in ifile:
if need_rate:
need_rate = False
rate = float(line)
continue
lineVals = line.split()
Nmeas = len(lineVals)
t_avg = 0
for v in lineVals:
t_avg += float(v)
times.append(float(v))
t_avg /= Nmeas
times_avg.append(t_avg)
Sorter = MySort()
times = Sorter.DefaultSort(times)
times_avg = Sorter.DefaultSort(times_avg)
# try some other methods! see how long they take
# times_avg = Sorter.BubbleSort(times_avg)
# times_avg = Sorter.InsertionSort(times_avg)
# times_avg = Sorter.QuickSort(times_avg)
# ADD YOUR CODE TO PLOT times AND times_avg HERE
|
{"/python/MySort.py": ["/python/Random.py"], "/python/CoinToss.py": ["/python/Random.py"], "/python/CookieAnalysis.py": ["/python/MySort.py"], "/python/CoinAnalysis.py": ["/python/Random.py"]}
|
29,788
|
crogan/PHSX815_Week2
|
refs/heads/master
|
/python/CoinAnalysis.py
|
#! /usr/bin/env python
# imports of external packages to use in our code
import sys
import math
import numpy as np
import matplotlib.pyplot as plt
# import our Random class from python/Random.py file
sys.path.append(".")
from python.Random import Random
# main function for our coin toss Python code
if __name__ == "__main__":
# if the user includes the flag -h or --help print the options
if '-h' in sys.argv or '--help' in sys.argv:
print ("Usage: %s [-seed number]" % sys.argv[0])
print
sys.exit(1)
# default single coin-toss probability for hypothesis 0
p0 = 0.5
# default single coin-toss probability for hypothesis 1
p1 = 0.9
haveH0 = False
haveH1 = False
if '-prob0' in sys.argv:
p = sys.argv.index('-prob0')
ptemp = float(sys.argv[p+1])
if ptemp >= 0 and ptemp <= 1:
p0 = ptemp
if '-prob1' in sys.argv:
p = sys.argv.index('-prob1')
ptemp = float(sys.argv[p+1])
if ptemp >= 0 and ptemp <= 1:
p1 = ptemp
if '-input0' in sys.argv:
p = sys.argv.index('-input0')
InputFile0 = sys.argv[p+1]
haveH0 = True
if '-input1' in sys.argv:
p = sys.argv.index('-input1')
InputFile1 = sys.argv[p+1]
haveH1 = True
if '-h' in sys.argv or '--help' in sys.argv or not haveH0:
print ("Usage: %s [options]" % sys.argv[0])
print (" options:")
print (" --help(-h) print options")
print (" -input0 [filename] name of file for H0 data")
print (" -input1 [filename] name of file for H1 data")
print (" -prob0 [number] probability of 1 for single toss for H0")
print (" -prob1 [number] probability of 1 for single toss for H1")
print
sys.exit(1)
Ntoss = 1
Npass0 = []
LogLikeRatio0 = []
Npass1 = []
LogLikeRatio1 = []
Npass_min = 1e8
Npass_max = -1e8
LLR_min = 1e8
LLR_max = -1e8
with open(InputFile0) as ifile:
for line in ifile:
lineVals = line.split()
Ntoss = len(lineVals)
Npass = 0
LLR = 0
for v in lineVals:
Npass += float(v)
# adding LLR for this toss
if float(v) >= 1:
LLR += math.log( p1/p0 )
else:
LLR += math.log( (1.-p1)/(1.-p0) )
if Npass < Npass_min:
Npass_min = Npass
if Npass > Npass_max:
Npass_max = Npass
if LLR < LLR_min:
LLR_min = LLR
if LLR > LLR_max:
LLR_max = LLR
Npass0.append(Npass)
LogLikeRatio0.append(LLR)
if haveH1:
with open(InputFile1) as ifile:
for line in ifile:
lineVals = line.split()
Ntoss = len(lineVals)
Npass = 0
LLR = 0
for v in lineVals:
Npass += float(v);
# adding LLR for this toss
if float(v) >= 1:
LLR += math.log( p1/p0 )
else:
LLR += math.log( (1.-p1)/(1.-p0) )
if Npass < Npass_min:
Npass_min = Npass
if Npass > Npass_max:
Npass_max = Npass
if LLR < LLR_min:
LLR_min = LLR
if LLR > LLR_max:
LLR_max = LLR
Npass1.append(Npass)
LogLikeRatio1.append(LLR)
title = str(Ntoss) + " tosses / experiment"
# make Npass figure
plt.figure()
plt.hist(Npass0, Ntoss+1, density=True, facecolor='b', alpha=0.5, label="assuming $\\mathbb{H}_0$")
if haveH1:
plt.hist(Npass1, Ntoss+1, density=True, facecolor='g', alpha=0.7, label="assuming $\\mathbb{H}_1$")
plt.legend()
plt.xlabel('$\\lambda = N_{pass}$')
plt.ylabel('Probability')
plt.title(title)
plt.grid(True)
plt.show()
# make LLR figure
plt.figure()
plt.hist(LogLikeRatio0, Ntoss+1, density=True, facecolor='b', alpha=0.5, label="assuming $\\mathbb{H}_0$")
if haveH1:
plt.hist(LogLikeRatio1, Ntoss+1, density=True, facecolor='g', alpha=0.7, label="assuming $\\mathbb{H}_1$")
plt.legend()
plt.xlabel('$\\lambda = \\log({\\cal L}_{\\mathbb{H}_{1}}/{\\cal L}_{\\mathbb{H}_{0}})$')
plt.ylabel('Probability')
plt.title(title)
plt.grid(True)
plt.show()
|
{"/python/MySort.py": ["/python/Random.py"], "/python/CoinToss.py": ["/python/Random.py"], "/python/CookieAnalysis.py": ["/python/MySort.py"], "/python/CoinAnalysis.py": ["/python/Random.py"]}
|
29,789
|
crogan/PHSX815_Week2
|
refs/heads/master
|
/python/Random.py
|
#! /usr/bin/env python
import math
import numpy as np
#################
# Random class
#################
# class that can generate random numbers
class Random:
"""A random number generator class"""
# initialization method for Random class
def __init__(self, seed = 5555):
self.seed = seed
self.m_v = np.uint64(4101842887655102017)
self.m_w = np.uint64(1)
self.m_u = np.uint64(1)
self.m_u = np.uint64(self.seed) ^ self.m_v
self.int64()
self.m_v = self.m_u
self.int64()
self.m_w = self.m_v
self.int64()
# function returns a random 64 bit integer
def int64(self):
with np.errstate(over='ignore'):
self.m_u = np.uint64(self.m_u * np.uint64(2862933555777941757) + np.uint64(7046029254386353087))
self.m_v ^= self.m_v >> np.uint64(17)
self.m_v ^= self.m_v << np.uint64(31)
self.m_v ^= self.m_v >> np.uint64(8)
self.m_w = np.uint64(np.uint64(4294957665)*(self.m_w & np.uint64(0xffffffff))) + np.uint64((self.m_w >> np.uint64(32)))
x = np.uint64(self.m_u ^ (self.m_u << np.uint64(21)))
x ^= x >> np.uint64(35)
x ^= x << np.uint64(4)
with np.errstate(over='ignore'):
return (x + self.m_v)^self.m_w
# function returns a random floating point number between (0, 1) (uniform)
def rand(self):
return 5.42101086242752217E-20 * self.int64()
# function returns a random integer (0 or 1) according to a Bernoulli distr.
def Bernoulli(self, p=0.5):
if p < 0. or p > 1.:
return 1
R = self.rand()
if R < p:
return 1
else:
return 0
# function returns a random double (0 to infty) according to an exponential distribution
def Exponential(self, beta=1.):
# make sure beta is consistent with an exponential
if beta <= 0.:
beta = 1.
R = self.rand();
while R <= 0.:
R = self.rand()
X = -math.log(R)/beta
return X
|
{"/python/MySort.py": ["/python/Random.py"], "/python/CoinToss.py": ["/python/Random.py"], "/python/CookieAnalysis.py": ["/python/MySort.py"], "/python/CoinAnalysis.py": ["/python/Random.py"]}
|
29,791
|
froggleston/pyani
|
refs/heads/master
|
/pyani/pyani_config.py
|
# Copyright 2013-2015, The James Hutton Insitute
# Author: Leighton Pritchard
#
# This code is part of the pyani package, and is governed by its licence.
# Please see the LICENSE file that should have been included as part of
# this package.
"""Configuration settings for the pyani package.
"""
from matplotlib.colors import LinearSegmentedColormap
# Defaults assume that common binaries are on the $PATH
NUCMER_DEFAULT = "nucmer"
FILTER_DEFAULT = "delta-filter"
BLASTN_DEFAULT = "blastn"
MAKEBLASTDB_DEFAULT = "makeblastdb"
BLASTALL_DEFAULT = "blastall"
FORMATDB_DEFAULT = "formatdb"
QSUB_DEFAULT = "qsub"
# Stems for output files
ANIM_FILESTEMS = ("ANIm_alignment_lengths", "ANIm_percentage_identity",
"ANIm_alignment_coverage", "ANIm_similarity_errors",
"ANIm_hadamard")
ANIB_FILESTEMS = ("ANIb_alignment_lengths", "ANIb_percentage_identity",
"ANIb_alignment_coverage", "ANIb_similarity_errors",
"ANIb_hadamard")
TETRA_FILESTEMS = ("TETRA_correlations",)
ANIBLASTALL_FILESTEMS = ("ANIblastall_alignment_lengths",
"ANIblastall_percentage_identity",
"ANIblastall_alignment_coverage",
"ANIblastall_similarity_errors",
"ANIblastall_hadamard")
# Output subdirectory names for each method
ALIGNDIR = {'ANIm': 'nucmer_output',
'ANIb': 'blastn_output',
'ANIblastall': 'blastall_output'}
# Any valid matplotlib colour map can be used here
# See, e.g. http://matplotlib.org/xkcd/examples/color/colormaps_reference.html
MPL_CBAR = 'Spectral'
# Parameters for analyses
FRAGSIZE = 1020 # Default ANIb fragment size
# SGE/OGE scheduler parameters
SGE_WAIT = 0.01 # Base unit of time (s) to wait between polling SGE
# Custom Matplotlib colourmaps
# 1a) Map for species boundaries (95%: 0.95), blue for values at
# 0.9 or below, red for values at 1.0; white at 0.95.
# Also, anything below 0.7 is 70% grey
cdict_spbnd_BuRd = {'red': ((0.0, 0.0, 0.7),
(0.7, 0.7, 0.0),
(0.9, 0.0, 0.0),
(0.95, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.7),
(0.7, 0.7, 0.0),
(0.9, 0.0, 0.0),
(0.95, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.7),
(0.7, 0.7, 1.0),
(0.95, 1.0, 1.0),
(1.0, 0.0, 0.0))}
CMAP_SPBND_BURD = LinearSegmentedColormap("spbnd_BuRd",
cdict_spbnd_BuRd)
# 1b) Map for species boundaries (95%: 0.95), blue for values at
# 0.9 or below, red for values at 1.0; white at 0.9.
# Also, anything below 0.8 is 70% grey
cdict_hadamard_BuRd = {'red': ((0.0, 0.0, 0.7),
(0.8, 0.7, 0.0),
(0.9, 0.0, 0.0),
(0.9, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.7),
(0.8, 0.7, 0.0),
(0.9, 0.0, 0.0),
(0.9, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.7),
(0.8, 0.7, 1.0),
(0.9, 1.0, 1.0),
(1.0, 0.0, 0.0))}
CMAP_HADAMARD_BURD = LinearSegmentedColormap("hadamard_BuRd",
cdict_hadamard_BuRd)
# 2) Blue for values at 0.0, red for values at 1.0; white at 0.5
cdict_BuRd = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 1.0, 1.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0))}
CMAP_BURD = LinearSegmentedColormap("BuRd", cdict_BuRd)
# Graphics parameters for each output file. Note that this should be
# in sync with the output file stems above
def params_mpl(df):
"""Returns dict of matplotlib parameters, dependent on dataframe."""
return {'ANIb_alignment_lengths': ('afmhot', df.values.min(),
df.values.max()),
'ANIb_percentage_identity': ('spbnd_BuRd', 0, 1),
'ANIb_alignment_coverage': ('BuRd', 0, 1),
'ANIb_hadamard': ('hadamard_BuRd', 0, 1),
'ANIb_similarity_errors': ('afmhot', df.values.min(),
df.values.max()),
'ANIm_alignment_lengths': ('afmhot', df.values.min(),
df.values.max()),
'ANIm_percentage_identity': ('spbnd_BuRd', 0, 1),
'ANIm_alignment_coverage': ('BuRd', 0, 1),
'ANIm_hadamard': ('hadamard_BuRd', 0, 1),
'ANIm_similarity_errors': ('afmhot', df.values.min(),
df.values.max()),
'TETRA_correlations': ('spbnd_BuRd', 0, 1),
'ANIblastall_alignment_lengths': ('afmhot', df.values.min(),
df.values.max()),
'ANIblastall_percentage_identity': ('spbnd_BuRd', 0, 1),
'ANIblastall_alignment_coverage': ('BuRd', 0, 1),
'ANIblastall_hadamard': ('hadamard_BuRd', 0, 1),
'ANIblastall_similarity_errors': ('afmhot', df.values.min(),
df.values.max())}
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,792
|
froggleston/pyani
|
refs/heads/master
|
/pyani/tetra.py
|
# Copyright 2013-2015, The James Hutton Insitute
# Author: Leighton Pritchard
#
# This code is part of the pyani package, and is governed by its licence.
# Please see the LICENSE file that should have been included as part of
# this package.
"""Code to implement the TETRA average nucleotide identity method.
Provides functions for calculation of TETRA as described in:
Richter M, Rossello-Mora R (2009) Shifting the genomic gold standard for the
prokaryotic species definition. Proc Natl Acad Sci USA 106: 19126-19131.
doi:10.1073/pnas.0906412106.
and
Teeling et al. (2004) Application of tetranucleotide frequencies for the
assignment of genomic fragments. Env. Microbiol. 6(9): 938-947.
doi:10.1111/j.1462-2920.2004.00624.x
"""
import collections
import os
import math
from itertools import product
import pandas as pd
from Bio import SeqIO
# Calculate tetranucleotide Z-score for a set of input sequences
def calculate_tetra_zscores(infilenames):
"""Returns dictionary of TETRA Z-scores for each input file.
- infilenames - collection of paths to sequence files
"""
org_tetraz = {}
for filename in infilenames:
org = os.path.splitext(os.path.split(filename)[-1])[0]
org_tetraz[org] = calculate_tetra_zscore(filename)
return org_tetraz
# Calculate tetranucleotide Z-score for a single sequence file
def calculate_tetra_zscore(filename):
"""Returns TETRA Z-score for the sequence in the passed file.
- filename - path to sequence file
Calculates mono-, di-, tri- and tetranucleotide frequencies
for each sequence, on each strand, and follows Teeling et al. (2004)
in calculating a corresponding Z-score for each observed
tetranucleotide frequency, dependent on the mono-, di- and tri-
nucleotide frequencies for that input sequence.
"""
# For the Teeling et al. method, the Z-scores require us to count
# mono, di, tri and tetranucleotide sequences - these are stored
# (in order) in the counts tuple
counts = (
collections.defaultdict(int),
collections.defaultdict(int),
collections.defaultdict(int),
collections.defaultdict(int),
)
for rec in SeqIO.parse(filename, "fasta"):
for seq in [str(rec.seq).upper(), str(rec.seq.reverse_complement()).upper()]:
# The Teeling et al. algorithm requires us to consider
# both strand orientations, so monocounts are easy
for base in ("G", "C", "T", "A"):
counts[0][base] += seq.count(base)
# For di, tri and tetranucleotide counts, loop over the
# sequence and its reverse complement, until near the end:
for i in range(len(seq[:-4])):
din, tri, tetra = seq[i : i + 2], seq[i : i + 3], seq[i : i + 4]
counts[1][str(din)] += 1
counts[2][str(tri)] += 1
counts[3][str(tetra)] += 1
# Then clean up the straggling bit at the end:
counts[2][str(seq[-4:-1])] += 1
counts[2][str(seq[-3:])] += 1
counts[1][str(seq[-4:-2])] += 1
counts[1][str(seq[-3:-1])] += 1
counts[1][str(seq[-2:])] += 1
# Following Teeling (2004), calculate expected frequencies for each
# tetranucleotide; we ignore ambiguity symbols
tetra_exp = {}
for tet in [tetn for tetn in counts[3] if tetra_clean(tetn)]:
tetra_exp[tet] = (
1.0 * counts[2][tet[:3]] * counts[2][tet[1:]] / counts[1][tet[1:3]]
)
# Following Teeling (2004) we approximate the std dev and Z-score for each
# tetranucleotide
tetra_sd = {}
bases = ["A", "C", "G", "T"]
tetra_z = {"".join(_): 0 for _ in product(bases, bases, bases, bases)}
for tet, exp in list(tetra_exp.items()):
den = counts[1][tet[1:3]]
tetra_sd[tet] = math.sqrt(
exp * (den - counts[2][tet[:3]]) * (den - counts[2][tet[1:]]) / (den * den)
)
try:
tetra_z[tet] = (counts[3][tet] - exp) / tetra_sd[tet]
except ZeroDivisionError:
# To record if we hit a zero in the estimation of variance
# zeroes = [k for k, v in list(tetra_sd.items()) if v == 0]
tetra_z[tet] = 1 / (counts[1][tet[1:3]] * counts[1][tet[1:3]])
return tetra_z
# Returns true if the passed string contains only A, C, G or T
def tetra_clean(string):
""" Checks that a passed string contains only unambiguous IUPAC nucleotide
symbols. We are assuming that a low frequency of IUPAC ambiguity
symbols doesn't affect our calculation.
"""
if not len(set(string) - set("ACGT")):
return True
return False
# Calculate Pearson's correlation coefficient from the Z-scores for each
# tetranucleotide.
def calculate_correlations(tetra_z):
"""Returns dataframe of Pearson correlation coefficients.
- tetra_z - dictionary of Z-scores, keyed by sequence ID
Calculates Pearson correlation coefficient from Z scores for each
tetranucleotide. This is done longhand here, which is fast enough,
but for robustness we might want to do something else... (TODO).
Note that we report a correlation by this method, rather than a
percentage identity.
"""
orgs = sorted(tetra_z.keys())
correlations = pd.DataFrame(index=orgs, columns=orgs, dtype=float).fillna(1.0)
for idx, org1 in enumerate(orgs[:-1]):
for org2 in orgs[idx + 1 :]:
tets = sorted(tetra_z[org1].keys())
zscores = [
[tetra_z[org1][t] for t in tets],
[tetra_z[org2][t] for t in tets],
]
zmeans = [sum(zscore) / len(zscore) for zscore in zscores]
zdiffs = [
[z - zmeans[0] for z in zscores[0]],
[z - zmeans[1] for z in zscores[1]],
]
diffprods = sum(
[zdiffs[0][i] * zdiffs[1][i] for i in range(len(zdiffs[0]))]
)
zdiffs2 = [sum([z * z for z in zdiffs[0]]), sum([z * z for z in zdiffs[1]])]
correlations[org1][org2] = diffprods / math.sqrt(zdiffs2[0] * zdiffs2[1])
correlations[org2][org1] = correlations[org1][org2]
return correlations
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,793
|
froggleston/pyani
|
refs/heads/master
|
/tests/test_tetra.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""test_tetra.py
Test tetra.py module.
These tests are intended to be run from the repository root using:
nosetests -v
print() statements will be caught by nosetests unless there is an
error. They can also be recovered with the -s option.
(c) The James Hutton Institute 2017
Author: Leighton Pritchard
Contact:
leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD6 9LH,
Scotland,
UK
The MIT License
Copyright (c) 2017 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
import os
import unittest
import pandas as pd
from nose.tools import (assert_equal, assert_false, assert_true)
from pandas.util.testing import (assert_frame_equal,)
from pyani import (tetra, )
def ordered(obj):
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
elif isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
class TestTETRA(unittest.TestCase):
"""Class defining tests of TETRA algorithm."""
def setUp(self):
"""Define parameters and values for tests."""
self.indir = os.path.join('tests', 'test_input', 'tetra')
self.tgtdir = os.path.join('tests', 'test_targets', 'tetra')
self.seqdir = os.path.join('tests', 'test_input', 'sequences')
self.infile = os.path.join(self.seqdir, 'NC_002696.fna')
self.infiles = [os.path.join(self.seqdir, fname) for fname in
os.listdir(self.seqdir)]
def test_tetraclean(self):
"""detects unambiguous IUPAC symbols correctly."""
assert_false(tetra.tetra_clean('ACGTYACGTACNGTACGWTACGT'))
assert_true(tetra.tetra_clean('ACGTACGTACGTACGTACGTAC'))
def test_zscore(self):
"""TETRA Z-score calculated correctly."""
tetra_z = tetra.calculate_tetra_zscore(self.infile)
with open(os.path.join(self.tgtdir, 'zscore.json'), 'r') as ifh:
target = json.load(ifh)
assert_equal(ordered(tetra_z), ordered(target))
def test_correlations(self):
"""TETRA correlation calculated correctly."""
infiles = ordered(self.infiles)[:2] # only test a single correlation
corr = tetra.calculate_correlations(tetra.calculate_tetra_zscores(infiles))
target = pd.read_csv(os.path.join(self.tgtdir, 'correlation.tab'), sep='\t',
index_col=0)
assert_frame_equal(corr, target)
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,794
|
froggleston/pyani
|
refs/heads/master
|
/pyani/pyani_graphics.py
|
# Copyright 2013-2019, The James Hutton Insitute
# Author: Leighton Pritchard
#
# This code is part of the pyani package, and is governed by its licence.
# Please see the LICENSE file that should have been included as part of
# this package.
"""Code to implement graphics output for ANI analyses."""
# Force matplotlib NOT to use an Xwindows backend on *nix, so that
# _tkinter.TclError is avoided when there is no $DISPLAY env: this can occur
# when running the package/script via ssh
# See http://stackoverflow.com/questions/2801882/\
# generating-a-png-with-matplotlib-when-display-is-undefined
# This needs to be done before importing pyplot
from math import floor, log10
import warnings
import matplotlib
# Specify matplotlib backend
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as distance
import seaborn as sns
import pandas as pd
from . import pyani_config
# Register Matplotlib colourmaps
plt.register_cmap(cmap=pyani_config.CMAP_SPBND_BURD)
plt.register_cmap(cmap=pyani_config.CMAP_HADAMARD_BURD)
plt.register_cmap(cmap=pyani_config.CMAP_BURD)
# Convenience class to hold heatmap graphics parameters
class Params(object): # pylint: disable=too-few-public-methods
"""Convenience class to hold heatmap rendering parameters."""
def __init__(self, params, labels=None, classes=None):
self.cmap = plt.get_cmap(params[0])
self.vmin = params[1]
self.vmax = params[2]
self.labels = labels
self.classes = classes
@property
def vdiff(self):
"""Returns difference between max and min values for presentation"""
return max(0.01, self.vmax - self.vmin)
# helper for cleaning up matplotlib axes by removing ticks etc.
def clean_axis(axis):
"""Remove ticks, tick labels, and frame from axis"""
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
for spine in list(axis.spines.values()):
spine.set_visible(False)
# Add classes colorbar to Seaborn plot
def get_seaborn_colorbar(dfr, classes):
"""Return a colorbar representing classes, for a Seaborn plot.
The aim is to get a pd.Series for the passed dataframe columns,
in the form:
0 colour for class in col 0
1 colour for class in col 1
... colour for class in col ...
n colour for class in col n
"""
levels = sorted(list(set(classes.values())))
paldict = {
lvl: pal
for (lvl, pal) in zip(
levels,
sns.cubehelix_palette(
len(levels), light=0.9, dark=0.1, reverse=True, start=1, rot=-2
),
)
}
lvl_pal = {cls: paldict[lvl] for (cls, lvl) in list(classes.items())}
col_cb = pd.Series(dfr.index).map(lvl_pal)
# The col_cb Series index now has to match the dfr.index, but
# we don't create the Series with this (and if we try, it
# fails) - so change it with this line
col_cb.index = dfr.index
return col_cb
# Get safe Seaborn labels
def get_safe_seaborn_labels(dfr, labels):
"""Returns labels guaranteed to correspond to the dataframe."""
if labels is not None:
return [labels.get(i, i) for i in dfr.index]
return [i for i in dfr.index]
# Return a clustermap
def get_seaborn_clustermap(dfr, params, title=None, annot=True):
"""Returns a Seaborn clustermap."""
fig = sns.clustermap(
dfr,
cmap=params.cmap,
vmin=params.vmin,
vmax=params.vmax,
col_colors=params.colorbar,
row_colors=params.colorbar,
figsize=(params.figsize, params.figsize),
linewidths=params.linewidths,
xticklabels=params.labels,
yticklabels=params.labels,
annot=annot,
)
fig.cax.yaxis.set_label_position("left")
if title:
fig.cax.set_ylabel(title)
# Rotate ticklabels
fig.ax_heatmap.set_xticklabels(fig.ax_heatmap.get_xticklabels(), rotation=90)
fig.ax_heatmap.set_yticklabels(fig.ax_heatmap.get_yticklabels(), rotation=0)
# Return clustermap
return fig
# Generate Seaborn heatmap output
def heatmap_seaborn(dfr, outfilename=None, title=None, params=None):
"""Returns seaborn heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
"""
# Decide on figure layout size: a minimum size is required for
# aesthetics, and a maximum to avoid core dumps on rendering.
# If we hit the maximum size, we should modify font size.
maxfigsize = 120
calcfigsize = dfr.shape[0] * 1.1
figsize = min(max(8, calcfigsize), maxfigsize)
if figsize == maxfigsize:
scale = maxfigsize / calcfigsize
sns.set_context("notebook", font_scale=scale)
# Add a colorbar?
if params.classes is None:
col_cb = None
else:
col_cb = get_seaborn_colorbar(dfr, params.classes)
# Labels are defined before we build the clustering
# If a label mapping is missing, use the key text as fall back
params.labels = get_safe_seaborn_labels(dfr, params.labels)
# Add attributes to parameter object, and draw heatmap
params.colorbar = col_cb
params.figsize = figsize
params.linewidths = 0.25
fig = get_seaborn_clustermap(dfr, params, title=title)
# Save to file
if outfilename:
fig.savefig(outfilename)
# Return clustermap
return fig
# Add dendrogram and axes to passed figure
def add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col"):
"""Return a dendrogram and corresponding gridspec, attached to the fig
Modifies the fig in-place. Orientation is either 'row' or 'col' and
determines location and orientation of the rendered dendrogram.
"""
# Row or column axes?
if orientation == "row":
dists = distance.squareform(distance.pdist(dfr))
spec = heatmap_gs[1, 0]
orient = "left"
nrows, ncols = 1, 2
height_ratios = [1]
else: # Column dendrogram
dists = distance.squareform(distance.pdist(dfr.T))
spec = heatmap_gs[0, 1]
orient = "top"
nrows, ncols = 2, 1
height_ratios = [1, 0.15]
# Create row dendrogram axis
gspec = gridspec.GridSpecFromSubplotSpec(
nrows,
ncols,
subplot_spec=spec,
wspace=0.0,
hspace=0.1,
height_ratios=height_ratios,
)
dend_axes = fig.add_subplot(gspec[0, 0])
dend = sch.dendrogram(
sch.linkage(distance.squareform(dists), method="complete"),
color_threshold=np.inf,
orientation=orient,
)
clean_axis(dend_axes)
return {"dendrogram": dend, "gridspec": gspec}
# Create heatmap axes for Matplotlib output
def get_mpl_heatmap_axes(dfr, fig, heatmap_gs):
"""Return axis for Matplotlib heatmap."""
# Create heatmap axis
heatmap_axes = fig.add_subplot(heatmap_gs[1, 1])
heatmap_axes.set_xticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0]))
heatmap_axes.set_yticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0]))
heatmap_axes.grid(False)
heatmap_axes.xaxis.tick_bottom()
heatmap_axes.yaxis.tick_right()
return heatmap_axes
def add_mpl_colorbar(dfr, fig, dend, params, orientation="row"):
"""Add class colorbars to Matplotlib heatmap."""
for name in dfr.index[dend["dendrogram"]["leaves"]]:
if name not in params.classes:
params.classes[name] = name
# Assign a numerical value to each class, for mpl
classdict = {cls: idx for (idx, cls) in enumerate(params.classes.values())}
# colourbar
cblist = []
for name in dfr.index[dend["dendrogram"]["leaves"]]:
try:
cblist.append(classdict[params.classes[name]])
except KeyError:
cblist.append(classdict[name])
colbar = pd.Series(cblist)
# Create colourbar axis - could capture if needed
if orientation == "row":
cbaxes = fig.add_subplot(dend["gridspec"][0, 1])
cbaxes.imshow(
[[cbar] for cbar in colbar.values],
cmap=plt.get_cmap(pyani_config.MPL_CBAR),
interpolation="nearest",
aspect="auto",
origin="lower",
)
else:
cbaxes = fig.add_subplot(dend["gridspec"][1, 0])
cbaxes.imshow(
[colbar],
cmap=plt.get_cmap(pyani_config.MPL_CBAR),
interpolation="nearest",
aspect="auto",
origin="lower",
)
clean_axis(cbaxes)
return colbar
# Add labels to the heatmap axes
def add_mpl_labels(heatmap_axes, rowlabels, collabels, params):
"""Add labels to Matplotlib heatmap axes, in-place."""
if params.labels:
# If a label mapping is missing, use the key text as fall back
rowlabels = [params.labels.get(lab, lab) for lab in rowlabels]
collabels = [params.labels.get(lab, lab) for lab in collabels]
xlabs = heatmap_axes.set_xticklabels(collabels)
ylabs = heatmap_axes.set_yticklabels(rowlabels)
for label in xlabs: # Rotate column labels
label.set_rotation(90)
for labset in (xlabs, ylabs): # Smaller font
for label in labset:
label.set_fontsize(8)
# Add colour scale to heatmap
def add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title=None):
"""Add colour scale to heatmap."""
# Set tick intervals
cbticks = [params.vmin + e * params.vdiff for e in (0, 0.25, 0.5, 0.75, 1)]
if params.vmax > 10:
exponent = int(floor(log10(params.vmax))) - 1
cbticks = [int(round(e, -exponent)) for e in cbticks]
scale_subplot = gridspec.GridSpecFromSubplotSpec(
1, 3, subplot_spec=heatmap_gs[0, 0], wspace=0.0, hspace=0.0
)
scale_ax = fig.add_subplot(scale_subplot[0, 1])
cbar = fig.colorbar(ax_map, scale_ax, ticks=cbticks)
if title:
cbar.set_label(title, fontsize=6)
cbar.ax.yaxis.set_ticks_position("left")
cbar.ax.yaxis.set_label_position("left")
cbar.ax.tick_params(labelsize=6)
cbar.outline.set_linewidth(0)
return cbar
# Generate Matplotlib heatmap output
def heatmap_mpl(dfr, outfilename=None, title=None, params=None):
"""Returns matplotlib heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
- params - a list of parameters for plotting: [colormap, vmin, vmax]
- labels - dictionary of alternative labels, keyed by default sequence
labels
- classes - dictionary of sequence classes, keyed by default sequence
labels
"""
# Layout figure grid and add title
# Set figure size by the number of rows in the dataframe
figsize = max(8, dfr.shape[0] * 0.175)
fig = plt.figure(figsize=(figsize, figsize))
# if title:
# fig.suptitle(title)
heatmap_gs = gridspec.GridSpec(
2, 2, wspace=0.0, hspace=0.0, width_ratios=[0.3, 1], height_ratios=[0.3, 1]
)
# Add column and row dendrograms/axes to figure
coldend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col")
rowdend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="row")
# Add heatmap axes to figure, with rows/columns as in the dendrograms
heatmap_axes = get_mpl_heatmap_axes(dfr, fig, heatmap_gs)
ax_map = heatmap_axes.imshow(
dfr.iloc[rowdend["dendrogram"]["leaves"], coldend["dendrogram"]["leaves"]],
interpolation="nearest",
cmap=params.cmap,
origin="lower",
vmin=params.vmin,
vmax=params.vmax,
aspect="auto",
)
# Are there class colourbars to add?
if params.classes is not None:
add_mpl_colorbar(dfr, fig, coldend, params, orientation="col")
add_mpl_colorbar(dfr, fig, rowdend, params, orientation="row")
# Add heatmap labels
add_mpl_labels(
heatmap_axes,
dfr.index[rowdend["dendrogram"]["leaves"]],
dfr.index[coldend["dendrogram"]["leaves"]],
params,
)
# Add colour scale
add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title)
# Return figure output, and write, if required
plt.subplots_adjust(top=0.85) # Leave room for title
# fig.set_tight_layout(True)
# We know that there is a UserWarning here about tight_layout and
# using the Agg renderer on OSX, so catch and ignore it, for cleanliness.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
heatmap_gs.tight_layout(fig, h_pad=0.1, w_pad=0.5)
if outfilename:
fig.savefig(outfilename)
return fig
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,795
|
froggleston/pyani
|
refs/heads/master
|
/tests/test_multiprocessing.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""test_multiprocessing.py
Test run_multiprocessing.py module.
These tests are intended to be run from the repository root using:
nosetests -v
print() statements will be caught by nosetests unless there is an
error. They can also be recovered with the -s option.
(c) The James Hutton Institute 2017
Author: Leighton Pritchard
Contact:
leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD6 9LH,
Scotland,
UK
The MIT License
Copyright (c) 2017 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import unittest
from nose.tools import assert_equal, nottest
from pyani import run_multiprocessing, pyani_jobs, anib
class TestMultiprocessing(unittest.TestCase):
"""Class defining tests of pyani's multiprocessing module."""
def setUp(self):
"""Define parameters and arguments for tests."""
self.cmdlist = [
'for i in %s; do echo "Thread %d: value ${i}"; done'
% (" ".join([str(e) for e in range(v)]), v)
for v in range(5)
]
self.cmds = ["ls -ltrh", "echo ${PWD}"]
self.seqdir = os.path.join("tests", "test_input", "sequences")
self.outdir = os.path.join("tests", "test_output", "multiprocessing")
self.infiles = [
os.path.join(self.seqdir, fname) for fname in os.listdir(self.seqdir)
][:2]
self.fraglen = 1000
os.makedirs(self.outdir, exist_ok=True)
def test_multiprocessing_run(self):
"""multiprocessing() runs basic jobs."""
result = run_multiprocessing.multiprocessing_run(self.cmdlist)
assert_equal(0, result)
def test_cmdsets(self):
"""module builds command sets."""
job1 = pyani_jobs.Job("dummy_with_dependency", self.cmds[0])
job2 = pyani_jobs.Job("dummy_dependency", self.cmds[1])
job1.add_dependency(job2)
cmdsets = run_multiprocessing.populate_cmdsets(job1, list(), depth=1)
target = [{cmd} for cmd in self.cmds]
assert_equal(cmdsets, target)
def test_dependency_graph_run(self):
"""module runs dependency graph."""
fragresult = anib.fragment_fasta_files(self.infiles, self.outdir, self.fraglen)
blastcmds = anib.make_blastcmd_builder("ANIb", self.outdir)
jobgraph = anib.make_job_graph(self.infiles, fragresult[0], blastcmds)
result = run_multiprocessing.run_dependency_graph(jobgraph)
assert_equal(0, result)
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,796
|
froggleston/pyani
|
refs/heads/master
|
/tests/test_anib.py
|
#!/usr/bin/env python
"""test_anib.py
Test anib.py module.
These tests are intended to be run from the repository root using:
nosetests -v
print() statements will be caught by nosetests unless there is an
error. They can also be recovered with the -s option.
(c) The James Hutton Institute 2017
Author: Leighton Pritchard
Contact:
leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD6 9LH,
Scotland,
UK
The MIT License
Copyright (c) 2017 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import unittest
import pandas as pd
from nose.tools import assert_equal, nottest
from pandas.util.testing import assert_frame_equal
from pyani import anib, pyani_files
class TestBLASTCmdline(unittest.TestCase):
"""Class defining tests of BLAST command-line generation."""
def setUp(self):
"""Set parameters for tests."""
self.indir = os.path.join("tests", "test_input", "anib")
self.outdir = os.path.join("tests", "test_output", "anib")
self.seqdir = os.path.join("tests", "test_input", "sequences")
self.infiles = [
os.path.join(self.seqdir, fname) for fname in os.listdir(self.seqdir)
]
self.fraglen = 1000
self.fmtdboutdir = os.path.join(self.outdir, "formatdb")
self.fmtdbcmd = " ".join(
[
"formatdb -p F -i",
"tests/test_output/anib/formatdb/NC_002696.fna",
"-t NC_002696",
]
)
self.makeblastdbdir = os.path.join(self.outdir, "makeblastdb")
self.makeblastdbcmd = " ".join(
[
"makeblastdb -dbtype nucl -in",
"tests/test_input/sequences/NC_002696.fna",
"-title NC_002696 -out",
os.path.join(
"tests", "test_output", "anib", "makeblastdb", "NC_002696.fna"
),
]
)
self.blastdbfnames = [
os.path.join(self.seqdir, fname)
for fname in ("NC_002696.fna", "NC_010338.fna")
]
self.blastdbtgt = [
(
" ".join(
[
"makeblastdb -dbtype nucl -in",
"tests/test_input/sequences/NC_002696.fna",
"-title NC_002696 -out",
"tests/test_output/anib/NC_002696.fna",
]
),
"tests/test_output/anib/NC_002696.fna",
),
(
" ".join(
[
"makeblastdb -dbtype nucl -in",
"tests/test_input/sequences/NC_010338.fna",
"-title NC_010338 -out",
"tests/test_output/anib/NC_010338.fna",
]
),
"tests/test_output/anib/NC_010338.fna",
),
]
self.blastdbtgtlegacy = [
(
" ".join(
[
"formatdb -p F -i",
"tests/test_output/anib/NC_002696.fna",
"-t NC_002696",
]
),
"tests/test_output/anib/NC_002696.fna",
),
(
" ".join(
[
"formatdb -p F -i",
"tests/test_output/anib/NC_010338.fna",
"-t NC_010338",
]
),
"tests/test_output/anib/NC_010338.fna",
),
]
self.blastncmd = " ".join(
[
"blastn -out",
os.path.join(
"tests", "test_output", "anib", "NC_002696_vs_NC_010338.blast_tab"
),
"-query tests/test_input/sequences/NC_002696.fna",
"-db tests/test_input/sequences/NC_010338.fna",
"-xdrop_gap_final 150 -dust no -evalue 1e-15",
"-max_target_seqs 1 -outfmt '6 qseqid sseqid",
"length mismatch pident nident qlen slen qstart",
"qend sstart send positive ppos gaps' -task blastn",
]
)
self.blastallcmd = " ".join(
[
"blastall -p blastn -o",
os.path.join(
"tests", "test_output", "anib", "NC_002696_vs_NC_010338.blast_tab"
),
"-i tests/test_input/sequences/NC_002696.fna",
"-d tests/test_input/sequences/NC_010338.fna",
"-X 150 -q -1 -F F -e 1e-15 -b 1 -v 1 -m 8",
]
)
self.blastntgt = [
" ".join(
[
"blastn -out",
os.path.join(
"tests",
"test_output",
"anib",
"NC_002696_vs_NC_010338.blast_tab",
),
"-query tests/test_input/sequences/NC_002696.fna",
"-db tests/test_input/sequences/NC_010338.fna",
"-xdrop_gap_final 150 -dust no -evalue 1e-15",
"-max_target_seqs 1 -outfmt '6 qseqid sseqid",
"length mismatch pident nident qlen slen qstart",
"qend sstart send positive ppos gaps' -task blastn",
]
),
" ".join(
[
"blastn -out",
os.path.join(
"tests",
"test_output",
"anib",
"NC_010338_vs_NC_002696.blast_tab",
),
"-query tests/test_input/sequences/NC_010338.fna",
"-db tests/test_input/sequences/NC_002696.fna",
"-xdrop_gap_final 150 -dust no -evalue 1e-15",
"-max_target_seqs 1 -outfmt '6 qseqid sseqid length",
"mismatch pident nident qlen slen qstart qend",
"sstart send positive ppos gaps' -task blastn",
]
),
]
self.blastalltgt = [
" ".join(
[
"blastall -p blastn -o",
os.path.join(
"tests",
"test_output",
"anib",
"NC_002696_vs_NC_010338.blast_tab",
),
"-i tests/test_input/sequences/NC_002696.fna",
"-d tests/test_input/sequences/NC_010338.fna",
"-X 150 -q -1 -F F -e 1e-15 -b 1 -v 1 -m 8",
]
),
" ".join(
[
"blastall -p blastn -o",
os.path.join(
"tests",
"test_output",
"anib",
"NC_010338_vs_NC_002696.blast_tab",
),
"-i tests/test_input/sequences/NC_010338.fna",
"-d tests/test_input/sequences/NC_002696.fna",
"-X 150 -q -1 -F F -e 1e-15 -b 1 -v 1 -m 8",
]
),
]
self.blastnjobdict = sorted(
[
(
"tests/test_output/anib/NC_002696.fna",
"makeblastdb -dbtype nucl "
+ "-in tests/test_input/sequences/NC_002696.fna "
+ "-title NC_002696 -out tests/test_output/anib/NC_002696.fna",
),
(
"tests/test_output/anib/NC_010338.fna",
"makeblastdb -dbtype nucl "
+ "-in tests/test_input/sequences/NC_010338.fna "
+ "-title NC_010338 -out tests/test_output/anib/NC_010338.fna",
),
(
"tests/test_output/anib/NC_011916.fna",
"makeblastdb -dbtype nucl "
+ "-in tests/test_input/sequences/NC_011916.fna "
+ "-title NC_011916 -out tests/test_output/anib/NC_011916.fna",
),
(
"tests/test_output/anib/NC_014100.fna",
"makeblastdb -dbtype nucl "
+ "-in tests/test_input/sequences/NC_014100.fna "
+ "-title NC_014100 -out tests/test_output/anib/NC_014100.fna",
),
]
)
self.blastalljobdict = sorted(
[
(
"tests/test_output/anib/NC_002696.fna",
"formatdb -p F -i tests/test_output/anib/NC_002696.fna "
+ "-t NC_002696",
),
(
"tests/test_output/anib/NC_010338.fna",
"formatdb -p F -i tests/test_output/anib/NC_010338.fna "
+ "-t NC_010338",
),
(
"tests/test_output/anib/NC_011916.fna",
"formatdb -p F -i tests/test_output/anib/NC_011916.fna "
+ "-t NC_011916",
),
(
"tests/test_output/anib/NC_014100.fna",
"formatdb -p F -i tests/test_output/anib/NC_014100.fna "
+ "-t NC_014100",
),
]
)
os.makedirs(self.outdir, exist_ok=True)
os.makedirs(self.fmtdboutdir, exist_ok=True)
os.makedirs(self.makeblastdbdir, exist_ok=True)
@nottest # legacy BLAST deprecated
def test_formatdb_generation(self):
"""generate formatdb command-line."""
cmd = anib.construct_formatdb_cmd(
os.path.join(self.seqdir, "NC_002696.fna"), self.fmtdboutdir
)
assert_equal(cmd[0], self.fmtdbcmd) # correct command
assert os.path.isfile(cmd[1]) # creates new file
def test_makeblastdb_generation(self):
"""generate makeblastdb command-line."""
cmd = anib.construct_makeblastdb_cmd(
os.path.join(self.seqdir, "NC_002696.fna"), self.makeblastdbdir
)
assert_equal(cmd[0], self.makeblastdbcmd) # correct command
def test_blastdb_commands(self):
"""generate BLAST+ db commands."""
# BLAST+
cmds = anib.generate_blastdb_commands(
self.blastdbfnames, self.outdir, mode="ANIb"
)
assert_equal(cmds, self.blastdbtgt)
@nottest # legacy BLAST deprecated
def test_legacy_blastdb_commands(self):
"""generate legacy BLAST db commands."""
# legacy
cmds = anib.generate_blastdb_commands(
self.blastdbfnames, self.outdir, mode="ANIblastall"
)
assert_equal(cmds, self.blastdbtgtlegacy)
def test_blastn_generation(self):
"""generate BLASTN+ command-line."""
cmd = anib.construct_blastn_cmdline(
self.blastdbfnames[0], self.blastdbfnames[1], self.outdir
)
assert_equal(cmd, self.blastncmd)
@nottest # legacy BLAST deprecated
def test_blastall_generation(self):
"""generate legacy BLASTN command-line."""
cmd = anib.construct_blastall_cmdline(
self.blastdbfnames[0], self.blastdbfnames[1], self.outdir
)
assert_equal(cmd, self.blastallcmd)
def test_blastn_commands(self):
"""generate BLASTN+ commands."""
# BLAST+
cmds = anib.generate_blastn_commands(
self.blastdbfnames, self.outdir, mode="ANIb"
)
assert_equal(cmds, self.blastntgt)
@nottest # legacy BLAST deprecated
def test_legacy_blastn_commands(self):
"""generate legacy BLASTN commands."""
cmds = anib.generate_blastn_commands(
self.blastdbfnames, self.outdir, mode="ANIblastall"
)
assert_equal(cmds, self.blastalltgt)
@nottest # legacy BLAST deprecated
def test_blastall_dbjobdict(self):
"""generate dictionary of legacy BLASTN database jobs."""
blastcmds = anib.make_blastcmd_builder("ANIblastall", self.outdir)
jobdict = anib.build_db_jobs(self.infiles, blastcmds)
assert_equal(
sorted([(k, v.script) for (k, v) in jobdict.items()]), self.blastalljobdict
)
def test_blastn_dbjobdict(self):
"""generate dictionary of BLASTN+ database jobs."""
blastcmds = anib.make_blastcmd_builder("ANIb", self.outdir)
jobdict = anib.build_db_jobs(self.infiles, blastcmds)
assert_equal(
sorted([(k, v.script) for (k, v) in jobdict.items()]), self.blastnjobdict
)
def test_blastn_graph(self):
"""create jobgraph for BLASTN+ jobs."""
fragresult = anib.fragment_fasta_files(self.infiles, self.outdir, self.fraglen)
blastcmds = anib.make_blastcmd_builder("ANIb", self.outdir)
jobgraph = anib.make_job_graph(self.infiles, fragresult[0], blastcmds)
# We check that the main script job is a blastn job, and that there
# is a single dependency, which is a makeblastdb job
for job in jobgraph:
assert job.script.startswith("blastn")
assert_equal(1, len(job.dependencies))
dep = job.dependencies[0]
assert dep.script.startswith("makeblastdb")
@nottest # legacy BLAST deprecated
def test_blastall_graph(self):
"""create jobgraph for legacy BLASTN jobs."""
fragresult = anib.fragment_fasta_files(self.infiles, self.outdir, self.fraglen)
blastcmds = anib.make_blastcmd_builder("ANIblastall", self.outdir)
jobgraph = anib.make_job_graph(self.infiles, fragresult[0], blastcmds)
# We check that the main script job is a blastn job, and that there
# is a single dependency, which is a makeblastdb job
for job in jobgraph:
assert job.script.startswith("blastall -p blastn")
assert_equal(1, len(job.dependencies))
dep = job.dependencies[0]
assert dep.script.startswith("formatdb")
class TestFragments(unittest.TestCase):
"""Class defining tests of ANIb FASTA fragmentation"""
def setUp(self):
"""Initialise parameters for tests."""
self.outdir = os.path.join("tests", "test_output", "anib")
self.seqdir = os.path.join("tests", "test_input", "sequences")
self.infnames = [
os.path.join(self.seqdir, fname)
for fname in (
"NC_002696.fna",
"NC_010338.fna",
"NC_011916.fna",
"NC_014100.fna",
)
]
self.outfnames = [
os.path.join(self.outdir, fname)
for fname in (
"NC_002696-fragments.fna",
"NC_010338-fragments.fna",
"NC_011916-fragments.fna",
"NC_014100-fragments.fna",
)
]
self.fraglen = 1000
os.makedirs(self.outdir, exist_ok=True)
def test_fragment_files(self):
"""fragment files for ANIb/ANIblastall."""
result = anib.fragment_fasta_files(self.infnames, self.outdir, self.fraglen)
# Are files created?
for outfname in self.outfnames:
assert os.path.isfile(outfname)
# Test fragment lengths
for accession, fragdict in result[-1].items():
for fragname, fraglen in fragdict.items():
assert fraglen <= self.fraglen
class TestParsing(unittest.TestCase):
"""Class defining tests of BLAST output parsing."""
def setUp(self):
self.indir = os.path.join("tests", "test_input", "anib")
self.seqdir = os.path.join("tests", "test_input", "sequences")
self.fragdir = os.path.join("tests", "test_input", "anib", "fragfiles")
self.anibdir = os.path.join("tests", "test_input", "anib", "blastn")
self.aniblastalldir = os.path.join("tests", "test_input", "anib", "blastall")
self.fname_legacy = os.path.join(self.indir, "NC_002696_vs_NC_010338.blast_tab")
self.fname = os.path.join(self.indir, "NC_002696_vs_NC_011916.blast_tab")
self.fragfname = os.path.join(self.indir, "NC_002696-fragments.fna")
self.fraglens = 1000
self.infnames = [
os.path.join(self.seqdir, fname)
for fname in (
"NC_002696.fna",
"NC_010338.fna",
"NC_011916.fna",
"NC_014100.fna",
)
]
self.fragfiles = [
os.path.join(self.fragdir, fname)
for fname in (
"NC_002696-fragments.fna",
"NC_010338-fragments.fna",
"NC_011916-fragments.fna",
"NC_014100-fragments.fna",
)
]
self.anibtgt = pd.DataFrame(
[
[1.000000, 0.796974, 0.999977, 0.837285],
[0.795958, 1.000000, 0.795917, 0.798250],
[0.999922, 0.795392, 1.000000, 0.837633],
[0.836780, 0.798704, 0.836823, 1.000000],
],
columns=["NC_002696", "NC_010338", "NC_011916", "NC_014100"],
index=["NC_002696", "NC_010338", "NC_011916", "NC_014100"],
)
self.aniblastalltgt = pd.DataFrame(
[
[1.000000, 0.785790, 0.999977, 0.830641],
[0.781319, 1.000000, 0.781281, 0.782723],
[0.999937, 0.782968, 1.000000, 0.830431],
[0.828919, 0.784533, 0.828853, 1.000000],
],
columns=["NC_002696", "NC_010338", "NC_011916", "NC_014100"],
index=["NC_002696", "NC_010338", "NC_011916", "NC_014100"],
)
@nottest # legacy BLASTN deprecated
def test_parse_blasttab(self):
"""parses ANIblastall .blast_tab output."""
fragdata = anib.get_fraglength_dict([self.fragfname])
# ANIb output
result = anib.parse_blast_tab(self.fname, fragdata, 0.3, 0.7, mode="ANIb")
assert_equal(result, (4016551, 93, 99.997693577050029))
# ANIblastall output
result = anib.parse_blast_tab(
self.fname_legacy, fragdata, 0.3, 0.7, mode="ANIblastall"
)
assert_equal(result, (1966922, 406104, 78.578978313253018))
def test_blastdir_processing(self):
"""parses directory of .blast_tab output."""
orglengths = pyani_files.get_sequence_lengths(self.infnames)
fraglengths = anib.get_fraglength_dict(self.fragfiles)
# ANIb
result = anib.process_blast(self.anibdir, orglengths, fraglengths, mode="ANIb")
assert_frame_equal(
result.percentage_identity.sort_index(1).sort_index(),
self.anibtgt.sort_index(1).sort_index(),
)
@nottest # legacy BLAST deprecated
def test_legacy_blastdir_processing(self):
"""parse directory of legacy .blast_tab output"""
orglengths = pyani_files.get_sequence_lengths(self.infnames)
fraglengths = anib.get_fraglength_dict(self.fragfiles)
# ANIblastall
result = anib.process_blast(
self.aniblastalldir, orglengths, fraglengths, mode="ANIblastall"
)
assert_frame_equal(
result.percentage_identity.sort_index(1).sort_index(),
self.aniblastalltgt.sort_index(1).sort_index(),
)
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,797
|
froggleston/pyani
|
refs/heads/master
|
/pyani/anim.py
|
# Copyright 2013-2017, The James Hutton Insitute
# Author: Leighton Pritchard
#
# This code is part of the pyani package, and is governed by its licence.
# Please see the LICENSE file that should have been included as part of
# this package.
"""Code to implement the ANIm average nucleotide identity method.
Calculates ANI by the ANIm method, as described in Richter et al (2009)
Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106.
All input FASTA format files are compared against each other, pairwise,
using NUCmer (binary location must be provided). NUCmer output will be stored
in a specified output directory.
The NUCmer .delta file output is parsed to obtain an alignment length
and similarity error count for every unique region alignment. These are
processed to give matrices of aligned sequence lengths, similarity error
counts, average nucleotide identity (ANI) percentages, and minimum aligned
percentage (of whole genome) for each pairwise comparison.
"""
import os
from . import pyani_config
from . import pyani_files
from . import pyani_jobs
from .pyani_tools import ANIResults
# Generate list of Job objects, one per NUCmer run
def generate_nucmer_jobs(
filenames,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
jobprefix="ANINUCmer",
):
"""Return a list of Jobs describing NUCmer command-lines for ANIm
- filenames - a list of paths to input FASTA files
- outdir - path to output directory
- nucmer_exe - location of the nucmer binary
- maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option
Loop over all FASTA files, generating Jobs describing NUCmer command lines
for each pairwise comparison.
"""
ncmds, fcmds = generate_nucmer_commands(
filenames, outdir, nucmer_exe, filter_exe, maxmatch
)
joblist = []
for idx, ncmd in enumerate(ncmds):
njob = pyani_jobs.Job("%s_%06d-n" % (jobprefix, idx), ncmd)
fjob = pyani_jobs.Job("%s_%06d-f" % (jobprefix, idx), fcmds[idx])
fjob.add_dependency(njob)
# joblist.append(njob) # not required: dependency in fjob
joblist.append(fjob)
return joblist
# Generate list of NUCmer pairwise comparison command lines from
# passed sequence filenames
def generate_nucmer_commands(
filenames,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
):
"""Return a tuple of lists of NUCmer command-lines for ANIm
The first element is a list of NUCmer commands, the second a list
of delta_filter_wrapper.py commands. These are ordered such that
commands are paired. The NUCmer commands should be run before
the delta-filter commands.
- filenames - a list of paths to input FASTA files
- outdir - path to output directory
- nucmer_exe - location of the nucmer binary
- maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option
Loop over all FASTA files generating NUCmer command lines for each
pairwise comparison.
"""
nucmer_cmdlines, delta_filter_cmdlines = [], []
for idx, fname1 in enumerate(filenames[:-1]):
for fname2 in filenames[idx + 1 :]:
ncmd, dcmd = construct_nucmer_cmdline(
fname1, fname2, outdir, nucmer_exe, filter_exe, maxmatch
)
nucmer_cmdlines.append(ncmd)
delta_filter_cmdlines.append(dcmd)
return (nucmer_cmdlines, delta_filter_cmdlines)
# Generate single NUCmer pairwise comparison command line from pair of
# input filenames
def construct_nucmer_cmdline(
fname1,
fname2,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
):
"""Returns a tuple of NUCmer and delta-filter commands
The split into a tuple was made necessary by changes to SGE/OGE. The
delta-filter command must now be run as a dependency of the NUCmer
command, and be wrapped in a Python script to capture STDOUT.
NOTE: This command-line writes output data to a subdirectory of the passed
outdir, called "nucmer_output".
- fname1 - query FASTA filepath
- fname2 - subject FASTA filepath
- outdir - path to output directory
- maxmatch - Boolean flag indicating whether to use NUCmer's -maxmatch
option. If not, the -mum option is used instead
"""
outsubdir = os.path.join(outdir, pyani_config.ALIGNDIR["ANIm"])
outprefix = os.path.join(
outsubdir,
"%s_vs_%s"
% (
os.path.splitext(os.path.split(fname1)[-1])[0],
os.path.splitext(os.path.split(fname2)[-1])[0],
),
)
if maxmatch:
mode = "--maxmatch"
else:
mode = "--mum"
nucmercmd = "{0} {1} -p {2} {3} {4}".format(
nucmer_exe, mode, outprefix, fname1, fname2
)
filtercmd = "delta_filter_wrapper.py " + "{0} -1 {1} {2}".format(
filter_exe, outprefix + ".delta", outprefix + ".filter"
)
return (nucmercmd, filtercmd)
# return "{0}; {1}".format(nucmercmd, filtercmd)
# Parse NUCmer delta file to get total alignment length and total sim_errors
def parse_delta(filename):
"""Returns (alignment length, similarity errors) tuple from passed .delta.
- filename - path to the input .delta file
Extracts the aligned length and number of similarity errors for each
aligned uniquely-matched region, and returns the cumulative total for
each as a tuple.
"""
aln_length, sim_errors = 0, 0
for line in [l.strip().split() for l in open(filename, "r").readlines()]:
if line[0] == "NUCMER" or line[0].startswith(">"): # Skip headers
continue
# We only process lines with seven columns:
if len(line) == 7:
aln_length += abs(int(line[1]) - int(line[0]))
sim_errors += int(line[4])
return aln_length, sim_errors
# Parse all the .delta files in the passed directory
def process_deltadir(delta_dir, org_lengths, logger=None):
"""Returns a tuple of ANIm results for .deltas in passed directory.
- delta_dir - path to the directory containing .delta files
- org_lengths - dictionary of total sequence lengths, keyed by sequence
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - symmetrical: total length of alignment
- percentage_identity - symmetrical: percentage identity of alignment
- alignment_coverage - non-symmetrical: coverage of query and subject
- similarity_errors - symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more NUCmer runs failed, or a
very distant sequence was included in the analysis.
"""
# Process directory to identify input files - as of v0.2.4 we use the
# .filter files that result from delta-filter (1:1 alignments)
deltafiles = pyani_files.get_input_files(delta_dir, ".filter")
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), "ANIm")
# Fill diagonal NA values for alignment_length with org_lengths
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
# Process .delta files assuming that the filename format holds:
# org1_vs_org2.delta
for deltafile in deltafiles:
qname, sname = os.path.splitext(os.path.split(deltafile)[-1])[0].split("_vs_")
# We may have .delta files from other analyses in the same directory
# If this occurs, we raise a warning, and skip the .delta file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input " % qname
+ "sequence list, skipping %s" % deltafile
)
continue
if sname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Subject name %s not in input " % sname
+ "sequence list, skipping %s" % deltafile
)
continue
tot_length, tot_sim_error = parse_delta(deltafile)
if tot_length == 0 and logger is not None:
if logger:
logger.warning(
"Total alignment length reported in " + "%s is zero!" % deltafile
)
query_cover = float(tot_length) / org_lengths[qname]
sbjct_cover = float(tot_length) / org_lengths[sname]
# Calculate percentage ID of aligned length. This may fail if
# total length is zero.
# The ZeroDivisionError that would arise should be handled
# Common causes are that a NUCmer run failed, or that a very
# distant sequence was included in the analysis.
try:
perc_id = 1 - float(tot_sim_error) / tot_length
except ZeroDivisionError:
perc_id = 0 # set arbitrary value of zero identity
results.zero_error = True
# Populate dataframes: when assigning data from symmetrical MUMmer
# output, both upper and lower triangles will be populated
results.add_tot_length(qname, sname, tot_length)
results.add_sim_errors(qname, sname, tot_sim_error)
results.add_pid(qname, sname, perc_id)
results.add_coverage(qname, sname, query_cover, sbjct_cover)
return results
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,798
|
froggleston/pyani
|
refs/heads/master
|
/tests/test_anim.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""test_anim.py
Test anim.py module.
These tests are intended to be run from the repository root using:
nosetests -v
print() statements will be caught by nosetests unless there is an
error. They can also be recovered with the -s option.
(c) The James Hutton Institute 2017
Author: Leighton Pritchard
Contact:
leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD6 9LH,
Scotland,
UK
The MIT License
Copyright (c) 2017 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import unittest
import pandas as pd
from nose.tools import (assert_equal,)
from pandas.util.testing import (assert_frame_equal,)
from pyani import (anim, pyani_files)
class TestNUCmerCmdline(unittest.TestCase):
"""Class defining tests of NUCmer command-line generation."""
def setUp(self):
"""Set parameters for tests."""
# Basic NUCmer and delta-filter command-line targets
self.ntgt = ' '.join(["nucmer --mum -p",
"tests/test_output/anim/nucmer_output/file1_vs_file2",
"file1.fna file2.fna"])
self.ntgtmax = ' '.join(["nucmer --maxmatch -p",
"tests/test_output/anim/nucmer_output/file1_vs_file2",
"file1.fna file2.fna"])
self.ftgt = ' '.join(["delta_filter_wrapper.py delta-filter -1",
"tests/test_output/anim/nucmer_output/file1_vs_file2.delta",
"tests/test_output/anim/nucmer_output/file1_vs_file2.filter"])
self.files = ["file1", "file2", "file3", "file4"]
self.ncmdlist = ['nucmer --mum -p ./nucmer_output/file1_vs_file2 file1 file2',
'nucmer --mum -p ./nucmer_output/file1_vs_file3 file1 file3',
'nucmer --mum -p ./nucmer_output/file1_vs_file4 file1 file4',
'nucmer --mum -p ./nucmer_output/file2_vs_file3 file2 file3',
'nucmer --mum -p ./nucmer_output/file2_vs_file4 file2 file4',
'nucmer --mum -p ./nucmer_output/file3_vs_file4 file3 file4']
self.fcmdlist = [' '.join(['delta_filter_wrapper.py delta-filter -1',
'./nucmer_output/file1_vs_file2.delta',
'./nucmer_output/file1_vs_file2.filter']),
' '.join(['delta_filter_wrapper.py delta-filter -1',
'./nucmer_output/file1_vs_file3.delta',
'./nucmer_output/file1_vs_file3.filter']),
' '.join(['delta_filter_wrapper.py delta-filter -1',
'./nucmer_output/file1_vs_file4.delta',
'./nucmer_output/file1_vs_file4.filter']),
' '.join(['delta_filter_wrapper.py delta-filter -1',
'./nucmer_output/file2_vs_file3.delta',
'./nucmer_output/file2_vs_file3.filter']),
' '.join(['delta_filter_wrapper.py delta-filter -1',
'./nucmer_output/file2_vs_file4.delta',
'./nucmer_output/file2_vs_file4.filter']),
' '.join(['delta_filter_wrapper.py delta-filter -1',
'./nucmer_output/file3_vs_file4.delta',
'./nucmer_output/file3_vs_file4.filter'])]
self.outdir = os.path.join('tests', 'test_output', 'anim')
self.indir = os.path.join('tests', 'test_input', 'anim')
def test_single_cmd_generation(self):
"""generate single abstract NUCmer/delta-filter command-line.
Tests that a single NUCmer/delta-filter command-line pair is
produced correctly
"""
cmds = anim.construct_nucmer_cmdline("file1.fna", "file2.fna",
outdir=self.outdir)
assert_equal(cmds, (self.ntgt, self.ftgt))
def test_maxmatch_cmd_generation(self):
"""generate NUCmer command line with maxmatch."""
ncmd, fcmd = anim.construct_nucmer_cmdline("file1.fna", "file2.fna",
outdir=self.outdir,
maxmatch=True)
assert_equal(ncmd, self.ntgtmax)
def test_multi_cmd_generation(self):
"""generate multiple abstract NUCmer/delta-filter command-lines.
Tests that all the input files are correctly-paired
"""
cmds = anim.generate_nucmer_commands(self.files)
assert_equal(cmds, (self.ncmdlist, self.fcmdlist))
def test_nucmer_job_generation(self):
"""generate dependency tree of NUCmer/delta-filter jobs.
Tests that the correct dependency graph and naming scheme is produced.
"""
joblist = anim.generate_nucmer_jobs(self.files,
jobprefix="test")
assert_equal(len(joblist), 6)
for idx, job in enumerate(joblist):
assert_equal(job.name, "test_%06d-f" % idx) # filter job name
assert_equal(len(job.dependencies), 1) # has NUCmer job
assert_equal(job.dependencies[0].name,
"test_%06d-n" % idx) # NUCmer job name
class TestDeltafileProcessing(unittest.TestCase):
"""Class defining tests for .delta/.filter file parsing"""
def setUp(self):
"""Set parameters for tests."""
self.indir = os.path.join('tests', 'test_input', 'anim')
self.seqdir = os.path.join('tests', 'test_input', 'sequences')
self.deltafile = os.path.join(self.indir, 'test.delta')
self.deltadir = os.path.join(self.indir, 'deltadir')
self.df_pid = pd.DataFrame([[1.000000, 0.850994, 0.999974, 0.867940],
[0.850994, 1.000000, 0.851074, 0.852842],
[0.999974, 0.851074, 1.000000, 0.867991],
[0.867940, 0.852842, 0.867991, 1.000000]],
columns=['NC_002696', 'NC_010338',
'NC_011916', 'NC_014100'],
index=['NC_002696', 'NC_010338',
'NC_011916', 'NC_014100'])
def test_deltafile_import(self):
"""parses NUCmer .delta/.filter file."""
result = anim.parse_delta(self.deltafile)
assert_equal(result, (4073917, 2191))
def test_process_deltadir(self):
"""processes directory of .delta files into ANIResults."""
seqfiles = pyani_files.get_fasta_files(self.seqdir)
orglengths = pyani_files.get_sequence_lengths(seqfiles)
result = anim.process_deltadir(self.deltadir, orglengths)
assert_frame_equal(result.percentage_identity.sort_index(1).sort_index(),
self.df_pid.sort_index(1).sort_index())
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,799
|
froggleston/pyani
|
refs/heads/master
|
/tests/test_parsing.py
|
#!/usr/bin/env python
"""Tests for pyani package intermediate file parsing
These tests are intended to be run using the nose package
(see https://nose.readthedocs.org/en/latest/).
"""
import os
from nose.tools import assert_equal
from pyani import anim
# Work out where we are. We need to do this to find related data files
# for testing
curdir = os.path.dirname(os.path.abspath(__file__))
# Path to test .delta file
DELTAFILE = os.path.join(curdir, 'test_ani_data',
'NC_002696_vs_NC_011916.delta')
# Test ANIm command-lines
# One pairwise comparison
def test_anim_delta():
"""Test parsing of NUCmer delta file."""
aln, sim = anim.parse_delta(DELTAFILE)
assert_equal(aln, 4073917)
assert_equal(sim, 2191)
print("Alignment length: {0}\nSimilarity Errors: {1}".format(aln, sim))
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,800
|
froggleston/pyani
|
refs/heads/master
|
/tests/test_concordance.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""test_concordance.py
Test for concordance of pyani package output with JSpecies
These tests are intended to be run from the repository root using:
nosetests -v
print() statements will be caught by nosetests unless there is an
error. They can also be recovered with the -s option.
(c) The James Hutton Institute 2017-2019
Author: Leighton Pritchard
Contact:
leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD6 9LH,
Scotland,
UK
The MIT License
Copyright (c) 2017-2019 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import shutil
import subprocess
import sys
import unittest
import pandas as pd
from nose.tools import assert_equal, assert_less, nottest
from pyani import run_multiprocessing as run_mp
from pyani import anib, anim, tetra, pyani_files, pyani_config
def parse_jspecies(infile):
"""Parse JSpecies output into Pandas dataframes.
The function expects a single file containing (legacy) ANIb,
ANIm, and TETRA output.
- infile path to JSpecies output file
This is an ugly function!
"""
dfs = dict()
methods = ("ANIm", "ANIb", "Tetra")
with open(infile, "r") as ifh:
header, in_table = False, False
for line in [l.strip() for l in ifh.readlines() + ["\n"]]:
if line in methods and not in_table:
method, header = line, True
elif header:
columns = line.split("\t")
data = pd.DataFrame(index=columns, columns=columns)
in_table, header = True, False
elif in_table:
if not len(line):
dfs[method] = data.sort_index(axis=0).sort_index(axis=1)
in_table = False
else:
ldata = line.split("\t")
row = ldata[0]
for idx, val in enumerate(ldata[1:]):
if val != "---":
data[columns[idx]][row] = float(val)
elif method.startswith("ANI"):
data[columns[idx]][row] = 100.0
else:
data[columns[idx]][row] = 1.0
else:
pass
return dfs
class TestConcordance(unittest.TestCase):
"""Class defining tests of pyani concordance with JSpecies."""
def setUp(self):
"""Set values and parameters for tests."""
self.indir = os.path.join("tests", "test_input", "concordance")
self.outdir = os.path.join("tests", "test_output", "concordance")
self.tgtdir = os.path.join("tests", "test_targets", "concordance")
self.deltadir = os.path.join(self.outdir, "nucmer_output")
self.infiles = pyani_files.get_fasta_files(self.indir)
self.orglengths = pyani_files.get_sequence_lengths(self.infiles)
self.target = parse_jspecies(os.path.join(self.tgtdir, "jspecies_output.tab"))
self.tolerance = {
"ANIm": 0.1,
"ANIb_lo": 5,
"ANIb_hi": 0.1,
"ANIblastall": 0.1,
"TETRA": 0.1,
}
self.fragsize = 1020
os.makedirs(self.outdir, exist_ok=True)
os.makedirs(self.deltadir, exist_ok=True)
def test_anim_concordance(self):
"""ANIm results concordant with JSpecies."""
# Perform ANIm on the input directory contents
# We have to separate nucmer/delta-filter command generation
# because Travis-CI doesn't play nicely with changes we made
# for local SGE/OGE integration.
# This might be avoidable with a scheduler flag passed to
# jobgroup generation in the anim.py module. That's a TODO.
ncmds, fcmds = anim.generate_nucmer_commands(self.infiles, self.outdir)
run_mp.multiprocessing_run(ncmds)
# delta-filter commands need to be treated with care for
# Travis-CI. Our cluster won't take redirection or semicolon
# separation in individual commands, but the wrapper we wrote
# for this (delta_filter_wrapper.py) can't be called under
# Travis-CI. So we must deconstruct the commands below
dfcmds = [
" > ".join([" ".join(fcmd.split()[1:-1]), fcmd.split()[-1]])
for fcmd in fcmds
]
run_mp.multiprocessing_run(dfcmds)
results = anim.process_deltadir(self.deltadir, self.orglengths)
result_pid = results.percentage_identity
result_pid.to_csv(os.path.join(self.outdir, "pyani_anim.tab"), sep="\t")
# Compare JSpecies output to results
result_pid = result_pid.sort_index(axis=0).sort_index(axis=1) * 100.0
diffmat = result_pid.values - self.target["ANIm"].values
anim_diff = pd.DataFrame(
diffmat, index=result_pid.index, columns=result_pid.columns
)
anim_diff.to_csv(os.path.join(self.outdir, "pyani_anim_diff.tab"), sep="\t")
assert_less(anim_diff.abs().values.max(), self.tolerance["ANIm"])
def test_anib_concordance(self):
"""ANIb results concordant with JSpecies.
We expect ANIb results to be quite different, as the BLASTN
algorithm changed substantially between BLAST and BLAST+
"""
# Perform ANIb on the input directory contents
outdir = os.path.join(self.outdir, "blastn")
os.makedirs(outdir, exist_ok=True)
fragfiles, fraglengths = anib.fragment_fasta_files(
self.infiles, outdir, self.fragsize
)
jobgraph = anib.make_job_graph(
self.infiles, fragfiles, anib.make_blastcmd_builder("ANIb", outdir)
)
assert_equal(0, run_mp.run_dependency_graph(jobgraph))
results = anib.process_blast(outdir, self.orglengths, fraglengths, mode="ANIb")
result_pid = results.percentage_identity
result_pid.to_csv(os.path.join(self.outdir, "pyani_anib.tab"), sep="\t")
# Compare JSpecies output to results. We do this in two blocks,
# masked according to whether the expected result is greater than
# 90% identity, or less than that threshold.
# The complete difference matrix is written to output, though
result_pid = result_pid.sort_index(axis=0).sort_index(axis=1) * 100.0
lo_result = result_pid.mask(result_pid >= 90).fillna(0)
hi_result = result_pid.mask(result_pid < 90).fillna(0)
lo_target = self.target["ANIb"].mask(self.target["ANIb"] >= 90).fillna(0)
hi_target = self.target["ANIb"].mask(self.target["ANIb"] < 90).fillna(0)
lo_diffmat = lo_result.values - lo_target.values
hi_diffmat = hi_result.values - hi_target.values
diffmat = result_pid.values - self.target["ANIb"].values
lo_diff = pd.DataFrame(
lo_diffmat, index=result_pid.index, columns=result_pid.columns
)
hi_diff = pd.DataFrame(
hi_diffmat, index=result_pid.index, columns=result_pid.columns
)
anib_diff = pd.DataFrame(
diffmat, index=result_pid.index, columns=result_pid.columns
)
anib_diff.to_csv(os.path.join(self.outdir, "pyani_anib_diff.tab"), sep="\t")
assert_less(lo_diff.abs().values.max(), self.tolerance["ANIb_lo"])
assert_less(hi_diff.abs().values.max(), self.tolerance["ANIb_hi"])
@nottest # legacy BLAST is deprecated
def test_aniblastall_concordance(self):
"""ANIblastall results concordant with JSpecies."""
# Perform ANIblastall on the input directory contents
outdir = os.path.join(self.outdir, "blastall")
os.makedirs(outdir, exist_ok=True)
fragfiles, fraglengths = anib.fragment_fasta_files(
self.infiles, outdir, self.fragsize
)
jobgraph = anib.make_job_graph(
self.infiles, fragfiles, anib.make_blastcmd_builder("ANIblastall", outdir)
)
assert_equal(0, run_mp.run_dependency_graph(jobgraph))
results = anib.process_blast(
outdir, self.orglengths, fraglengths, mode="ANIblastall"
)
result_pid = results.percentage_identity
result_pid.to_csv(os.path.join(self.outdir, "pyani_aniblastall.tab"), sep="\t")
# Compare JSpecies output to results
result_pid = result_pid.sort_index(axis=0).sort_index(axis=1) * 100.0
diffmat = result_pid.values - self.target["ANIb"].values
aniblastall_diff = pd.DataFrame(
diffmat, index=result_pid.index, columns=result_pid.columns
)
aniblastall_diff.to_csv(
os.path.join(self.outdir, "pyani_aniblastall_diff.tab"), sep="\t"
)
assert_less(aniblastall_diff.abs().values.max(), self.tolerance["ANIblastall"])
def test_tetra_concordance(self):
"""TETRA results concordant with JSpecies."""
# Perform TETRA analysis
zscores = dict()
for filename in self.infiles:
org = os.path.splitext(os.path.split(filename)[-1])[0]
zscores[org] = tetra.calculate_tetra_zscore(filename)
results = tetra.calculate_correlations(zscores)
results.to_csv(os.path.join(self.outdir, "pyani_tetra.tab"), sep="\t")
# Compare JSpecies output
diffmat = results.values - self.target["Tetra"].values
tetra_diff = pd.DataFrame(diffmat, index=results.index, columns=results.columns)
tetra_diff.to_csv(os.path.join(self.outdir, "pyani_tetra_diff.tab"), sep="\t")
assert_less(tetra_diff.abs().values.max(), self.tolerance["TETRA"])
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,801
|
froggleston/pyani
|
refs/heads/master
|
/setup.py
|
# try using distribute or setuptools or distutils.
try:
import distribute_setup
distribute_setup.use_setuptools()
except ImportError:
pass
import setuptools
import os
import sys
import re
# Get long description from README.md
with open("README.md", "r") as dfh:
long_description = dfh.read()
# parse version from package/module without importing or evaluating the code
with open(os.path.join("pyani", "__init__.py"), "r") as fh:
for line in fh:
m = re.search(r'^__version__ = "(?P<version>[^"]+)"$', line)
if m:
version = m.group("version")
break
if sys.version_info <= (3, 0):
sys.stderr.write("ERROR: pyani requires Python 3 " + "or above...exiting.\n")
sys.exit(1)
setuptools.setup(
name="pyani",
version=version,
author="Leighton Pritchard",
author_email="leighton.pritchard@hutton.ac.uk",
description="pyani provides a package and script for calculation of genome-scale average nucleotide identity.",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
keywords="genome bioinformatics sequence",
platforms="Posix; MacOS X",
url="http://widdowquinn.github.io/pyani/",
download_url="https://github.com/widdowquinn/pyani/releases",
scripts=[
os.path.join("bin", "average_nucleotide_identity.py"),
os.path.join("bin", "genbank_get_genomes_by_taxon.py"),
os.path.join("bin", "delta_filter_wrapper.py"),
],
packages=["pyani"],
package_data={"pyani": ["tests/test_JSpecies/*.tab"]},
include_package_data=True,
install_requires=["biopython", "matplotlib", "pandas", "scipy", "seaborn"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
)
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,802
|
froggleston/pyani
|
refs/heads/master
|
/tests/test_dependencies.py
|
#!/usr/bin/env python
"""Tests for availability of pyani dependencies
We only test for dependencies from non-standard libraries.
These tests are intended to be run using the nose package
(see https://nose.readthedocs.org/en/latest/).
"""
import subprocess
import sys
from nose.tools import assert_equal, nottest
def test_import_biopython():
"""Test Biopython import."""
import Bio
def test_import_matplotlib():
"""Test matplotlib import."""
import matplotlib
def test_import_numpy():
"""Test numpy import."""
import numpy
def test_import_pandas():
"""Test pandas import."""
import pandas
def test_import_scipy():
"""Test scipy import."""
import scipy
def test_run_blast():
"""Test that BLAST+ is runnable."""
cmd = "blastn -version"
result = subprocess.run(
cmd,
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
print(result.stdout)
assert_equal(result.stdout[:6], b"blastn")
@nottest
def test_run_blastall():
"""Test that legacy BLAST is runnable."""
cmd = "blastall"
# Can't use check=True, as blastall without arguments returns 1!
result = subprocess.run(
cmd,
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
print(result.stdout)
assert_equal(result.stdout[1:9], b"blastall")
def test_run_nucmer():
"""Test that NUCmer is runnable."""
cmd = "nucmer --version"
result = subprocess.run(
cmd,
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
print(result.stderr) # NUCmer puts output to STDERR!
assert_equal(result.stderr[:6], b"nucmer")
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,803
|
froggleston/pyani
|
refs/heads/master
|
/pyani/pyani_tools.py
|
# Copyright 2016-2019, The James Hutton Insitute
# Author: Leighton Pritchard
#
# This code is part of the pyani package, and is governed by its licence.
# Please see the LICENSE file that should have been included as part of
# this package.
"""Code to support pyani."""
import pandas as pd
from . import pyani_config
# Class to hold ANI dataframe results
class ANIResults(object):
"""Holds ANI dataframe results."""
def __init__(self, labels, mode):
"""Initialise with four empty, labelled dataframes."""
self.alignment_lengths = pd.DataFrame(index=labels, columns=labels, dtype=float)
self.similarity_errors = pd.DataFrame(
index=labels, columns=labels, dtype=float
).fillna(0)
self.percentage_identity = pd.DataFrame(
index=labels, columns=labels, dtype=float
).fillna(1.0)
self.alignment_coverage = pd.DataFrame(
index=labels, columns=labels, dtype=float
).fillna(1.0)
self.zero_error = False
self.mode = mode
def add_tot_length(self, qname, sname, value, sym=True):
"""Add a total length value to self.alignment_lengths."""
self.alignment_lengths.loc[qname, sname] = value
if sym:
self.alignment_lengths.loc[sname, qname] = value
def add_sim_errors(self, qname, sname, value, sym=True):
"""Add a similarity error value to self.similarity_errors."""
self.similarity_errors.loc[qname, sname] = value
if sym:
self.similarity_errors.loc[sname, qname] = value
def add_pid(self, qname, sname, value, sym=True):
"""Add a percentage identity value to self.percentage_identity."""
self.percentage_identity.loc[qname, sname] = value
if sym:
self.percentage_identity.loc[sname, qname] = value
def add_coverage(self, qname, sname, qcover, scover=None):
"""Add percentage coverage values to self.alignment_coverage."""
self.alignment_coverage.loc[qname, sname] = qcover
if scover:
self.alignment_coverage.loc[sname, qname] = scover
@property
def hadamard(self):
"""Return Hadamard matrix (identity * coverage)."""
return self.percentage_identity * self.alignment_coverage
@property
def data(self):
"""Return list of (dataframe, filestem) tuples."""
stemdict = {
"ANIm": pyani_config.ANIM_FILESTEMS,
"ANIb": pyani_config.ANIB_FILESTEMS,
"ANIblastall": pyani_config.ANIBLASTALL_FILESTEMS,
}
return zip(
(
self.alignment_lengths,
self.percentage_identity,
self.alignment_coverage,
self.similarity_errors,
self.hadamard,
),
stemdict[self.mode],
)
# return [(self.alignment_lengths, "ANIm_alignment_lengths"),
# (self.percentage_identity, "ANIm_percentage_identity"),
# (self.alignment_coverage, "ANIm_alignment_coverage"),
# (self.similarity_errors, "ANIm_similarity_errors"),
# (self.hadamard, "ANIm_hadamard")]
# Class to hold BLAST functions
class BLASTfunctions(object):
"""Class to hold BLAST functions."""
def __init__(self, db_func, blastn_func):
self.db_func = db_func
self.blastn_func = blastn_func
# Class to hold BLAST executables
class BLASTexes(object):
"""Class to hold BLAST functions."""
def __init__(self, format_exe, blast_exe):
self.format_exe = format_exe
self.blast_exe = blast_exe
# Class to hold/build BLAST commands
class BLASTcmds(object):
"""Class to hold BLAST command data for construction of BLASTN and
database formatting commands.
"""
def __init__(self, funcs, exes, prefix, outdir):
self.funcs = funcs
self.exes = exes
self.prefix = prefix
self.outdir = outdir
def build_db_cmd(self, fname):
"""Return database format/build command"""
return self.funcs.db_func(fname, self.outdir, self.exes.format_exe)[0]
def get_db_name(self, fname):
"""Return database filename"""
return self.funcs.db_func(fname, self.outdir, self.exes.format_exe)[1]
def build_blast_cmd(self, fname, dbname):
"""Return BLASTN command"""
return self.funcs.blastn_func(fname, dbname, self.outdir, self.exes.blast_exe)
# Read sequence annotations in from file
def get_labels(filename, logger=None):
"""Returns a dictionary of alternative sequence labels, or None
- filename - path to file containing tab-separated table of labels
Input files should be formatted as <key>\t<label>, one pair per line.
"""
labeldict = {}
if filename is not None:
if logger:
logger.info("Reading labels from %s", filename)
with open(filename, "r") as ifh:
count = 0
for line in ifh.readlines():
count += 1
try:
key, label = line.strip().split("\t")
except ValueError:
if logger:
logger.warning("Problem with class file: %s", filename)
logger.warning("%d: %s", (count, line.strip()))
logger.warning("(skipping line)")
continue
else:
labeldict[key] = label
return labeldict
|
{"/pyani/anim.py": ["/pyani/pyani_tools.py"]}
|
29,807
|
NatanaelGSSilva/backendhome
|
refs/heads/master
|
/config.py
|
class config:
SQLALCHEMY_DATABASE_URI = 'sqlite:///database/revenda.db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SALT = "X#3jfk$%kKmGw&*jKLiPW@!jm345"
JWT_SECRET_KEY = 'hjsdfhj#$@DFhsms@%ldkPç()H#Dnx3@'
JWT_BLACKLIST_ENABLED = True
|
{"/app.py": ["/config.py", "/resources/carros.py", "/resources/propostas.py"], "/resources/propostas.py": ["/models/modelProposta.py"]}
|
29,808
|
NatanaelGSSilva/backendhome
|
refs/heads/master
|
/app.py
|
from flask import Flask
from config import config
from banco import db
from resources.carros import carros
from resources.marcas import marcas
from resources.usuarios import usuarios
from resources.propostas import propostas
from flask_jwt_extended import JWTManager
from blacklist import blacklist
import smtplib
from flask_cors import CORS
app = Flask(__name__)
app.config.from_object(config)
db.init_app(app)
jwt = JWTManager(app)
# libera todas as rotas (não éa melhor opção, em termos de segurança)
# A melhor forma, é vista no exemplo anterior, indicar quais rotas devem ser liberadas para acesso
CORS(app)
app.register_blueprint(carros)
app.register_blueprint(marcas)
app.register_blueprint(usuarios)
app.register_blueprint(propostas)
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return jti in blacklist
@app.route('/')
def raiz():
db.create_all()
return '<h2>Revenda Herbie</h2>'
@app.route('/envia_email')
def envia():
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login('conta.teste.laravel@gmail.com', 'conta#teste#laravel')
server.set_debuglevel(1)
msg = 'Subject: Teste PI2\nÓla Teste de Envio de e-mail pelo Python\nÉ bom esse Python!!'.encode(
'utf-8')
server.sendmail('conta.teste.laravel@gmail.com',
'dasilvanatanael700@gmail.com', msg)
server.quit()
return "OK! E-mail Enviado."
if __name__ == '__main__':
app.run(debug=True)
|
{"/app.py": ["/config.py", "/resources/carros.py", "/resources/propostas.py"], "/resources/propostas.py": ["/models/modelProposta.py"]}
|
29,809
|
NatanaelGSSilva/backendhome
|
refs/heads/master
|
/models/modelProposta.py
|
from banco import db
from datetime import datetime
# quinta parte
class Proposta(db.Model):
__tablename__ = 'propostas'
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
lance = db.Column(db.Float, nullable=False)
nomePessoa = db.Column(db.String(100), nullable=False)
telefone = db.Column(db.String(40), nullable=False)
email = db.Column(db.String(100), nullable=False)
data_proposta = db.Column(
db.DateTime, nullable=False, default=datetime.utcnow)
carro_id = db.Column(db.Integer, db.ForeignKey(
'carros.id'), nullable=False)
carro = db.relationship('Carro')
def to_json(self):
json_propostas = {
'id': self.id,
'lance': self.lance,
'nomePessoa': self.nomePessoa,
'telefone': self.telefone,
'email': self.email,
'modelo': self.carro.modelo,
'carro_id': self.carro_id
}
return json_propostas
@staticmethod
def from_json(json_propostas):
lance = json_propostas.get('lance')
carro_id = json_propostas.get('carro_id')
nomePessoa = json_propostas.get('nomePessoa')
telefone = json_propostas.get('telefone')
email = json_propostas.get('email')
return Proposta(lance=lance, carro_id=carro_id, nomePessoa=nomePessoa, telefone=telefone, email=email)
|
{"/app.py": ["/config.py", "/resources/carros.py", "/resources/propostas.py"], "/resources/propostas.py": ["/models/modelProposta.py"]}
|
29,810
|
NatanaelGSSilva/backendhome
|
refs/heads/master
|
/resources/carros.py
|
from flask import Blueprint, jsonify, request
from banco import db
from models.modelCarro import Carro
from flask_jwt_extended import jwt_required
carros = Blueprint('carros', __name__)
@carros.route('/carros')
def listagem():
carros = Carro.query.order_by(Carro.modelo).all()
return jsonify([carro.to_json() for carro in carros])
@carros.route('/carros', methods=['POST'])
@jwt_required
def inclusao():
carro = Carro.from_json(request.json)
db.session.add(carro)
db.session.commit()
return jsonify(carro.to_json()), 201
# Parte 1 do Trabalho
@carros.errorhandler(404)
def id_invalido(error):
return jsonify({'id': 0, 'message': 'not found'}), 404
@carros.route('/carros/<int:id>', methods=['PUT'])
@jwt_required
def alteracao(id):
# obtém o registro a ser alterado (ou gera um erro 404 - not found)
carro = Carro.query.get_or_404(id)
# recupera os dados enviados na requisição
carro.modelo = request.json['modelo']
carro.cor = request.json['cor']
carro.ano = request.json['ano']
carro.preco = request.json['preco']
carro.foto = request.json['foto']
carro.destaque = request.json['destaque']
carro.marca_id = request.json['marca_id']
# altera (pois o id já existe)
db.session.add(carro)
db.session.commit()
return jsonify(carro.to_json()), 204
@carros.route('/carros/<int:id>')
def consulta(id):
# obtém o registro a ser alterado (ou gera um erro 404 - not found)
carro = Carro.query.get_or_404(id)
return jsonify(carro.to_json()), 200
@carros.route('/carros/<int:id>', methods=['DELETE'])
@jwt_required
def exclui(id):
Carro.query.filter_by(id=id).delete()
db.session.commit()
return jsonify({'id': id, 'message': 'Carro excluído com sucesso'}), 200
# Parte 2 do Trabalho
@carros.route('/carros/destaque')
def destaqueCarro():
carros = Carro.query.order_by(Carro.modelo).filter(
Carro.destaque == 'x').all()
return jsonify([carro.to_json() for carro in carros])
# @carros.route('/carros/destacar/<int:id>',methods=['PUT'])
# def destacarCarro(id):
# carro = Carro.query.get_or_404(id)
# carro.destaque = request.json['destaque']
# db.session.add(carro)
# db.session.commit()
# return jsonify(carro.to_json()), 204
@carros.route('/carros/destacar/<int:id>', methods=['PUT'])
# @cross_origin()
def destacaVeiculo(id):
carro = Carro.query.get_or_404(id)
if carro.destaque == 'x':
carro.destaque = '-'
else:
carro.destaque = 'x'
db.session.add(carro)
db.session.commit()
if carro.destaque == 'x':
return jsonify({'id': id, 'message': 'Veículo destacado com sucesso'}), 200
else:
return jsonify({'id': id, 'message': 'Veículo retirado dos destaques'}), 200
# Parte 4 do Trabalho
@carros.route('/carros/filtro/<palavra>')
def pesquisa(palavra):
# obtém todos os registros da tabela veiculos em ordem de modelo
carros = Carro.query.order_by(Carro.modelo).filter(
Carro.modelo.like(f'%{palavra}%'))
if carros == []:
return jsonify({'Não foi encontrado veiculos com esse modelo'}), 404
return jsonify([carro.to_json() for carro in carros])
|
{"/app.py": ["/config.py", "/resources/carros.py", "/resources/propostas.py"], "/resources/propostas.py": ["/models/modelProposta.py"]}
|
29,811
|
NatanaelGSSilva/backendhome
|
refs/heads/master
|
/resources/propostas.py
|
from flask import Blueprint, jsonify, request
from banco import db
from models.modelProposta import Proposta
from models.modelCarro import Carro
from flask_jwt_extended import jwt_required
from datetime import datetime, timedelta
# from flask_cors import CORS, cross_origin
import smtplib
propostas = Blueprint('propostas', __name__)
@propostas.route('/propostas')
def listagem():
# propostas = Proposta.query.order_by(Proposta.lance).all()
propostas = Proposta.query.all()
return jsonify([proposta.to_json() for proposta in propostas])
@propostas.route('/propostas', methods=['POST'])
# @jwt_required
# @cross_origin()
def inclusao():
proposta = Proposta.from_json(request.json)
# server = smtplib.SMTP('smtp.gmail.com', 587)
# server.starttls()
# server.login('email', 'senha')
# server.set_debuglevel(1)
# nomePessoa = request.json['nomePessoa']
# email = request.json['email']
# telefone = request.json['telefone']
# lance = request.json['lance']
# modelo = request.json['carro_id']
# msg = 'Ola senhor(a) ' + nomePessoa + 'o seu lance foi ' + str(lance) + ', tal proposta sera avaliada e retornaremos por email ' + \
# email + ' ou telefone ' + telefone + 'sobre o veiculo' + str(modelo)
# server.sendmail('f{email}', email, msg)
# server.quit()
db.session.add(proposta)
db.session.commit()
return jsonify(proposta.to_json()), 201
@propostas.route('/propostas/aceitar', methods=['POST'])
# @jwt_required
def aceitar():
proposta = Proposta.from_json(request.json)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login('email', 'senha')
server.set_debuglevel(1)
nomePessoa = request.json['nomePessoa']
email = request.json['email']
telefone = request.json['telefone']
lance = request.json['lance']
modelo = request.json['modelo']
msg = 'Ola senhor(a) ' + nomePessoa + 'o seu lance foi ' + str(lance) + \
'foi aceito e esperaramos o senhor em nossa css para comprar sua nave '
server.sendmail('f{email}', email, msg)
server.quit()
return jsonify(proposta.to_json()), 201
@propostas.errorhandler(404)
def id_invalido(error):
return jsonify({'id': 0, 'message': 'not found'}), 404
@propostas.route('/propostas/<int:id>', methods=['PUT'])
def alteracao(id):
# obtém o registro a ser alterado (ou gera um erro 404 - not found)
proposta = Proposta.query.get_or_404(id)
# recupera os dados enviados na requisição
proposta.lance = request.json['lance']
proposta.carro_id = request.json['carro_id']
proposta.nomePessoa = request.json['nomePessoa']
proposta.telefone = request.json['telefone']
proposta.email = request.json['email']
# altera (pois o id já existe)
db.session.add(proposta)
db.session.commit()
return jsonify(proposta.to_json()), 204
@propostas.route('/propostas/<int:id>')
def consulta(id):
# obtém o registro a ser alterado (ou gera um erro 404 - not found)
proposta = Proposta.query.get_or_404(id)
return jsonify(proposta.to_json()), 200
@propostas.route('/propostas/<int:id>', methods=['DELETE'])
def exclui(id):
Proposta.query.filter_by(id=id).delete()
db.session.commit()
return jsonify({'id': id, 'message': 'Proposta excluída com sucesso'}), 200
# Parte 7 do Trabalho
# select count(*) as contagem, faixa salarial from usuarios GROUP BY;
@propostas.route('/propostas/estatisticas')
def estatisticas():
if db.session.query(Proposta).count() == 0:
numLance = 0
lanceBaixo = 0
lanceAlto = 0
else:
# numLance = db.session.query(db.func.count(Proposta.id)).first()[0]
# funciona
numLance = db.session.query(Proposta.carro_id, db.func.count(
Proposta.id)).group_by(Proposta.carro_id).all()
# lanceAlto =db.session.query(Proposta.id.desc()).group_by(Proposta.lance).limit(1).all()
return jsonify({'numLance': numLance}), 200
@propostas.route('/propostas/modelos')
def carrosgraf():
total = db.session.query(db.func.count(
Proposta.carro_id)).group_by(Proposta.carro_id).all()
propostas = db.session.query(Carro.modelo, db.func.count(
Proposta.carro_id)/2).group_by(Carro.modelo).all()
print(propostas)
print(total)
num = 0
lista = []
for proposta in propostas:
lista.append({'modelo': proposta[0], 'num': total[num][0]})
num = +1
print(lista)
return jsonify(lista), 201
@propostas.route('/cadastros/propostas')
def propostascad():
propostas = db.session.query(db.func.year(Proposta.data_proposta)+'-'+db.func.month(Proposta.data_proposta), db.func.count(Proposta.id)) \
.group_by(db.func.year(Proposta.data_proposta)+'-'+db.func.month(Proposta.data_proposta)) \
.filter(Proposta.data_proposta > datetime.today() - timedelta(365))
print(propostas)
lista = []
for proposta in propostas:
lista.append({'data': proposta[0], 'num': proposta[1]})
print(lista)
return jsonify(lista), 201
|
{"/app.py": ["/config.py", "/resources/carros.py", "/resources/propostas.py"], "/resources/propostas.py": ["/models/modelProposta.py"]}
|
29,818
|
astropenguin/morecopy
|
refs/heads/main
|
/morecopy/copy.py
|
__all__ = ["copy"]
# standard library
from copy import copy as stdlib_copy
from copy import _copy_dispatch as stdlib_copiers # type: ignore
from threading import Lock
from typing import TypeVar
# submodules
from .copiers import copiers
# type hints
T = TypeVar("T")
# lock object
lock = Lock()
# copy function
def copy(obj: T) -> T:
"""Copy an object.
Unlike ``copy.copy``, this function even copies an immutable object
as a different one if a dedicated copier is defined in the package.
Otherwise, it is equivalent to ``copy.copy``.
Args:
obj: An object to be copied.
Returns:
An object copied from the original.
"""
with lock:
original = stdlib_copiers.copy()
try:
stdlib_copiers.update(copiers)
return stdlib_copy(obj)
finally:
stdlib_copiers.clear()
stdlib_copiers.update(original)
|
{"/morecopy/copy.py": ["/morecopy/copiers.py"], "/morecopy/__init__.py": ["/morecopy/copy.py", "/morecopy/copiers.py"], "/tests/test_copiers.py": ["/morecopy/copiers.py"], "/tests/test_copy.py": ["/morecopy/copy.py"]}
|
29,819
|
astropenguin/morecopy
|
refs/heads/main
|
/morecopy/copiers.py
|
__all__ = ["copier_for"]
# standard library
from copy import copy
from types import FunctionType
from typing import Any, Callable, Dict, Iterable, TypeVar
# type hints
T = TypeVar("T")
FT = TypeVar("FT", bound=FunctionType)
IT = TypeVar("IT", bound=Iterable)
Copier = Callable[[T], T]
# decorator
def copier_for(type_: Any) -> Callable[[Copier[T]], Copier[T]]:
"""Register a copier as one of the builtin copiers."""
def register(copier: Copier[T]) -> Copier[T]:
copiers[type_] = copier
return copier
return register
# builtin copiers
copiers: Dict[Any, Copier[Any]] = {}
@copier_for(int)
@copier_for(float)
@copier_for(complex)
@copier_for(str)
@copier_for(bytes)
@copier_for(range)
@copier_for(slice)
def copy_by_repr(obj: T) -> T:
"""Copy an object by evaluating its repr string."""
return eval(repr(obj))
@copier_for(tuple)
@copier_for(frozenset)
def copy_by_type(obj: IT) -> IT:
"""Copy an object by recreating an object of its type."""
return type(obj)(item for item in obj) # type: ignore
@copier_for(FunctionType)
def copy_function(obj: FT) -> FT:
"""Copy a function object by recreating it."""
copied = type(obj)(
obj.__code__,
obj.__globals__,
obj.__name__,
obj.__defaults__,
obj.__closure__,
)
# mutable objects are copied.
copied.__annotations__ = copy(obj.__annotations__)
copied.__dict__ = copy(obj.__dict__)
copied.__kwdefaults__ = copy(obj.__kwdefaults__)
# immutable objects are just assigned.
copied.__doc__ = obj.__doc__
copied.__module__ = obj.__module__
copied.__name__ = obj.__name__
copied.__qualname__ = obj.__qualname__
return copied
|
{"/morecopy/copy.py": ["/morecopy/copiers.py"], "/morecopy/__init__.py": ["/morecopy/copy.py", "/morecopy/copiers.py"], "/tests/test_copiers.py": ["/morecopy/copiers.py"], "/tests/test_copy.py": ["/morecopy/copy.py"]}
|
29,820
|
astropenguin/morecopy
|
refs/heads/main
|
/morecopy/__init__.py
|
__all__ = [
"copy",
"copiers",
"copier_for",
]
__version__ = "0.3.0"
# submodules
from . import copy
from . import copiers
from .copy import *
from .copiers import *
|
{"/morecopy/copy.py": ["/morecopy/copiers.py"], "/morecopy/__init__.py": ["/morecopy/copy.py", "/morecopy/copiers.py"], "/tests/test_copiers.py": ["/morecopy/copiers.py"], "/tests/test_copy.py": ["/morecopy/copy.py"]}
|
29,821
|
astropenguin/morecopy
|
refs/heads/main
|
/tests/test_copiers.py
|
# standard library
from types import FunctionType, LambdaType
from typing import Type, TypeVar
# dependencies
from morecopy.copiers import copiers
from pytest import mark
# type hints
T = TypeVar("T")
# test data
def function(a: int, b: int) -> int:
return a + b
test_header = "type_, value"
test_data = [
(int, 1234567890),
(float, 1.234567890),
(complex, 1.2345 + 6.7890j),
(str, "lorem ipsum"),
(bytes, b"lorem ipsum"),
(tuple, (123, 4.56, 7.8e90)),
(range, range(1234567890)),
(slice, slice(1234, 5678, 90)),
(frozenset, frozenset({123, 4.56, 7.8e90})),
(FunctionType, function),
(LambdaType, lambda a, b: a + b),
]
# test functions
@mark.parametrize(test_header, test_data)
def test_copier_eq(type_: Type[T], value: T) -> None:
if type_ is FunctionType:
return
assert value == copiers[type_](value)
@mark.parametrize(test_header, test_data)
def test_copier_is(type_: Type[T], value: T) -> None:
assert value is not copiers[type_](value)
|
{"/morecopy/copy.py": ["/morecopy/copiers.py"], "/morecopy/__init__.py": ["/morecopy/copy.py", "/morecopy/copiers.py"], "/tests/test_copiers.py": ["/morecopy/copiers.py"], "/tests/test_copy.py": ["/morecopy/copy.py"]}
|
29,822
|
astropenguin/morecopy
|
refs/heads/main
|
/tests/test_copy.py
|
# standard library
from copy import copy as stdlib_copy
from types import FunctionType, LambdaType
from typing import Type, TypeVar
# dependencies
from morecopy.copy import copy
from pytest import mark
# type hints
T = TypeVar("T")
# test data
def function(a: int, b: int) -> int:
return a + b
test_header = "type_, value"
test_data = [
(int, 1234567890),
(float, 1.234567890),
(complex, 1.2345 + 6.7890j),
(str, "lorem ipsum"),
(bytes, b"lorem ipsum"),
(tuple, (123, 4.56, 7.8e90)),
(range, range(1234567890)),
(slice, slice(1234, 5678, 90)),
(frozenset, frozenset({123, 4.56, 7.8e90})),
(FunctionType, function),
(LambdaType, lambda a, b: a + b),
]
# test functions
@mark.parametrize(test_header, test_data)
def test_copy_eq(type_: Type[T], value: T) -> None:
if type_ is FunctionType:
return
assert value == copy(value)
assert value == stdlib_copy(value)
@mark.parametrize(test_header, test_data)
def test_copy_is(type_: Type[T], value: T) -> None:
assert value is not copy(value)
assert value is stdlib_copy(value)
|
{"/morecopy/copy.py": ["/morecopy/copiers.py"], "/morecopy/__init__.py": ["/morecopy/copy.py", "/morecopy/copiers.py"], "/tests/test_copiers.py": ["/morecopy/copiers.py"], "/tests/test_copy.py": ["/morecopy/copy.py"]}
|
29,964
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/migration/alembic/versions/fac66b439033_add_view_model.py
|
"""Add view model
Revision ID: fac66b439033
Revises: 309cf493a1e2
Create Date: 2023-03-07 12:42:08.667620
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "fac66b439033"
down_revision = "309cf493a1e2"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"analysisviews",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("table_name", sa.String(length=100), nullable=False),
sa.Column("description", sa.Text(), nullable=False),
sa.Column("datastack_name", sa.String(length=100), nullable=False),
sa.Column("voxel_resolution_x", sa.Float(), nullable=False),
sa.Column("voxel_resolution_y", sa.Float(), nullable=False),
sa.Column("voxel_resolution_z", sa.Float(), nullable=False),
sa.Column("notice_text", sa.Text(), nullable=True),
sa.Column("live_compatible", sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
def downgrade():
op.drop_table("analysisviews")
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,965
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/migration/alembic/versions/814d72d74e3b_add_version_error_table.py
|
"""Add version error table
Revision ID: 814d72d74e3b
Revises: 975a79461cab
Create Date: 2022-09-15 12:23:50.769937
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "814d72d74e3b"
down_revision = "975a79461cab"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"version_error",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column(
"error",
postgresql.JSON(astext_type=sa.Text()),
autoincrement=False,
nullable=True,
),
sa.Column(
"analysisversion_id", sa.INTEGER(), autoincrement=False, nullable=True
),
sa.ForeignKeyConstraint(
["analysisversion_id"],
["analysisversion.id"],
name="version_error_analysisversion_id_fkey",
),
sa.PrimaryKeyConstraint("id", name="version_error_pkey"),
)
def downgrade():
op.drop_table("versionerror")
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,966
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/tests/test_schema.py
|
import marshmallow
from emannotationschemas.errors import UnknownAnnotationTypeException
import pytest
from sqlalchemy.ext.declarative.api import DeclarativeMeta
def test_get_schema(dadb_interface):
valid_schema = dadb_interface.schema.get_schema("synapse")
assert isinstance(valid_schema, marshmallow.schema.SchemaMeta)
with pytest.raises(UnknownAnnotationTypeException) as excinfo:
non_valid_schema = dadb_interface.schema.get_schema("bad_schema")
assert non_valid_schema is None
def test_get_flattened_schema(dadb_interface):
valid_flat_schema = dadb_interface.schema.get_flattened_schema("synapse")
assert isinstance(valid_flat_schema, marshmallow.schema.SchemaMeta)
def test_create_annotation_model(dadb_interface):
new_model = dadb_interface.schema.create_annotation_model(
"test_synapse_2", "synapse"
)
assert isinstance(new_model, DeclarativeMeta)
def test_create_segmentation_model(dadb_interface):
valid_schema = dadb_interface.schema.create_segmentation_model(
"test_synapse_2", "synapse", "test_annodb"
)
assert isinstance(valid_schema, DeclarativeMeta)
def test_create_reference_annotation_model(dadb_interface):
valid_ref_schema = dadb_interface.schema.create_reference_annotation_model(
"test_ref_table_2", "presynaptic_bouton_type", "test_synapse_2"
)
assert isinstance(valid_ref_schema, DeclarativeMeta)
def test_get_split_model(dadb_interface):
anno_model, seg_model = dadb_interface.schema.get_split_models(
"test_synapse_2", "synapse", "test_annodb"
)
assert isinstance(anno_model, DeclarativeMeta)
assert isinstance(seg_model, DeclarativeMeta)
def test_get_split_model_with_no_seg_columns(dadb_interface):
table_metadata = {"reference_table": "test_synapse_2"}
anno_model, seg_model = dadb_interface.schema.get_split_models(
table_name="test_simple_group",
schema_type="reference_simple_group",
segmentation_source="test_annodb",
table_metadata=table_metadata,
)
assert isinstance(anno_model, DeclarativeMeta)
assert seg_model == None
def test_create_flat_model(dadb_interface):
valid_ref_schema = dadb_interface.schema.create_flat_model(
"test_flat_table_1",
"synapse",
"test_annodb",
)
assert isinstance(valid_ref_schema, DeclarativeMeta)
def test_flattened_schema_data(dadb_interface):
test_data = {
"id": 1,
"pre_pt": {"position": [222, 123, 1232]},
"ctr_pt": {"position": [121, 123, 1232]},
"post_pt": {"position": [555, 555, 5555]},
}
flattened_data = dadb_interface.schema.flattened_schema_data(test_data)
flat_data = {
"ctr_pt_position": [121, 123, 1232],
"id": 1,
"post_pt_position": [555, 555, 5555],
"pre_pt_position": [222, 123, 1232],
}
assert flattened_data == flat_data
def test_split_flattened_schema(dadb_interface):
anno_schema, seg_schema = dadb_interface.schema.split_flattened_schema("synapse")
assert isinstance(anno_schema, marshmallow.schema.SchemaMeta)
assert isinstance(seg_schema, marshmallow.schema.SchemaMeta)
def test_split_flattened_schema_data(dadb_interface):
test_data = {
"id": 1,
"pre_pt": {"position": [222, 123, 1232]},
"ctr_pt": {"position": [121, 123, 1232]},
"post_pt": {"position": [555, 555, 5555]},
}
flat_anno_data, flat_seg_data = dadb_interface.schema.split_flattened_schema_data(
"synapse", test_data
)
assert flat_anno_data, flat_seg_data is False
def test__parse_schema_metadata_params(dadb_interface):
metadata = {"reference_table": "some_other_table", "track_target_id_updates": True}
metadata_params = dadb_interface.schema._parse_schema_metadata_params(
"presynaptic_bouton_type", "test_table_3", metadata, ["some_other_table"]
)
assert metadata_params == ("some_other_table", True)
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,967
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/migration/alembic/versions/8fdc843fc202_adding_permission_and_last_modified.py
|
"""adding permission and last modified
Revision ID: 8fdc843fc202
Revises: 6e7f580ff680
Create Date: 2022-10-17 14:11:33.017738
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "8fdc843fc202"
down_revision = "6e7f580ff680"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
permission_enum = postgresql.ENUM(
"PRIVATE", "GROUP", "PUBLIC", name="readwrite_permission"
)
permission_enum.create(op.get_bind())
op.add_column(
"annotation_table_metadata",
sa.Column(
"write_permission",
postgresql.ENUM("PRIVATE", "GROUP", "PUBLIC", name="readwrite_permission"),
nullable=True,
),
)
op.add_column(
"annotation_table_metadata",
sa.Column(
"read_permission",
postgresql.ENUM("PRIVATE", "GROUP", "PUBLIC", name="readwrite_permission"),
nullable=True,
),
)
op.add_column(
"annotation_table_metadata",
sa.Column("last_modified", sa.DateTime(), nullable=True),
)
# ### end Alembic commands ###
op.execute("UPDATE annotation_table_metadata SET read_permission = 'PUBLIC'")
op.execute("UPDATE annotation_table_metadata SET write_permission = 'PRIVATE'")
op.execute("UPDATE annotation_table_metadata SET last_modified = current_timestamp")
op.alter_column("annotation_table_metadata", "write_permission", nullable=False)
op.alter_column("annotation_table_metadata", "read_permission", nullable=False)
op.alter_column("annotation_table_metadata", "last_modified", nullable=False)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("annotation_table_metadata", "last_modified")
op.drop_column("annotation_table_metadata", "read_permission")
op.drop_column("annotation_table_metadata", "write_permission")
# ### end Alembic commands ###
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,968
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/database.py
|
import logging
from contextlib import contextmanager
from typing import List
from sqlalchemy import create_engine, func, inspect, or_
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.ext.declarative.api import DeclarativeMeta
from sqlalchemy.orm import Session, scoped_session, sessionmaker
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.schema import MetaData
from sqlalchemy.sql.schema import Table
from .errors import TableAlreadyExists, TableNameNotFound, TableNotInMetadata
from .models import AnnoMetadata, Base, SegmentationMetadata, AnalysisView
from .schema import DynamicSchemaClient
class DynamicAnnotationDB:
def __init__(self, sql_url: str, pool_size=5, max_overflow=5) -> None:
self._cached_session = None
self._cached_tables = {}
self._engine = create_engine(
sql_url, pool_recycle=3600, pool_size=pool_size, max_overflow=max_overflow
)
self.base = Base
self.base.metadata.bind = self._engine
self.base.metadata.create_all(
tables=[AnnoMetadata.__table__, SegmentationMetadata.__table__],
checkfirst=True,
)
self.session = scoped_session(
sessionmaker(bind=self.engine, autocommit=False, autoflush=False)
)
self.schema_client = DynamicSchemaClient()
self._inspector = inspect(self.engine)
self._cached_session = None
self._cached_tables = {}
@property
def inspector(self):
return self._inspector
@property
def engine(self):
return self._engine
@property
def cached_session(self) -> Session:
if self._cached_session is None:
self._cached_session = self.session()
return self._cached_session
@contextmanager
def session_scope(self):
try:
yield self.cached_session
except Exception as e:
self.cached_session.rollback()
logging.exception(f"SQL Error: {e}")
raise e
finally:
self.cached_session.close()
self._cached_session = None
def commit_session(self):
try:
self.cached_session.commit()
except Exception as e:
self.cached_session.rollback()
logging.exception(f"SQL Error: {e}")
raise e
finally:
self.cached_session.close()
self._cached_session = None
def get_table_sql_metadata(self, table_name: str):
self.base.metadata.reflect(bind=self.engine)
return self.base.metadata.tables[table_name]
def get_views(self, datastack_name: str):
with self.session_scope() as session:
query = session.query(AnalysisView).filter(
AnalysisView.datastack_name == datastack_name
)
return query.all()
def get_view_metadata(self, datastack_name: str, view_name: str):
with self.session_scope() as session:
query = (
session.query(AnalysisView)
.filter(AnalysisView.table_name == view_name)
.filter(AnalysisView.datastack_name == datastack_name)
)
result = query.one()
if hasattr(result, "__dict__"):
return self.get_automap_items(result)
else:
return result[0]
def get_table_metadata(self, table_name: str, filter_col: str = None):
data = getattr(AnnoMetadata, filter_col) if filter_col else AnnoMetadata
with self.session_scope() as session:
if filter_col and data:
query = session.query(data).filter(
AnnoMetadata.table_name == table_name
)
result = query.one()
if hasattr(result, "__dict__"):
return self.get_automap_items(result)
else:
return result[0]
else:
metadata = (
session.query(data, SegmentationMetadata)
.outerjoin(
SegmentationMetadata,
AnnoMetadata.table_name
== SegmentationMetadata.annotation_table,
)
.filter(
or_(
AnnoMetadata.table_name == table_name,
SegmentationMetadata.table_name == table_name,
)
)
.all()
)
try:
if metadata:
flatted_metadata = self.flatten_join(metadata)
return flatted_metadata[0]
except NoResultFound:
return None
def get_table_schema(self, table_name: str) -> str:
table_metadata = self.get_table_metadata(table_name)
return table_metadata.get("schema_type")
def get_valid_table_names(self) -> List[str]:
with self.session_scope() as session:
metadata = session.query(AnnoMetadata).all()
return [m.table_name for m in metadata if m.valid == True]
def get_annotation_table_size(self, table_name: str) -> int:
"""Get the number of annotations in a table
Parameters
----------
table_name : str
name of table contained within the aligned_volume database
Returns
-------
int
number of annotations
"""
Model = self.cached_table(table_name)
with self.session_scope() as session:
return session.query(Model).count()
def get_max_id_value(self, table_name: str) -> int:
model = self.cached_table(table_name)
with self.session_scope() as session:
return session.query(func.max(model.id)).scalar()
def get_min_id_value(self, table_name: str) -> int:
model = self.cached_table(table_name)
with self.session_scope() as session:
return session.query(func.min(model.id)).scalar()
def get_table_row_count(
self, table_name: str, filter_valid: bool = False, filter_timestamp: str = None
) -> int:
"""Get row counts. Optionally can filter by row validity and
by timestamp.
Args:
table_name (str): Name of table
filter_valid (bool, optional): Filter only valid rows. Defaults to False.
filter_timestamp (None, optional): Filter rows up to timestamp . Defaults to False.
Returns:
int: number of rows
"""
model = self.cached_table(table_name)
with self.session_scope() as session:
sql_query = session.query(func.count(model.id))
if filter_valid:
sql_query = sql_query.filter(model.valid == True)
if filter_timestamp and hasattr(model, "created"):
sql_query = sql_query.filter(model.created <= filter_timestamp)
return sql_query.scalar()
@staticmethod
def get_automap_items(result):
return {k: v for (k, v) in result.__dict__.items() if k != "_sa_instance_state"}
def obj_to_dict(self, obj):
if obj:
return {
column.key: getattr(obj, column.key)
for column in inspect(obj).mapper.column_attrs
}
else:
return {}
def flatten_join(self, _list: List):
return [{**self.obj_to_dict(a), **self.obj_to_dict(b)} for a, b in _list]
def drop_table(self, table_name: str) -> bool:
"""Drop a table, actually removes it from the database
along with segmentation tables associated with it
Parameters
----------
table_name : str
name of table to drop
Returns
-------
bool
whether drop was successful
"""
table = self.base.metadata.tables.get(table_name)
if table:
logging.info(f"Deleting {table_name} table")
self.base.metadata.drop_all(self._engine, [table], checkfirst=True)
if self._is_cached(table):
del self._cached_tables[table]
return True
return False
def _check_table_is_unique(self, table_name):
existing_tables = self._get_existing_table_names()
if table_name in existing_tables:
raise TableAlreadyExists(
f"Table creation failed: {table_name} already exists"
)
return existing_tables
def _get_existing_table_names(self, filter_valid: bool = False) -> List[str]:
"""Collects table_names keys of existing tables
Returns
-------
list
List of table_names
"""
with self.session_scope() as session:
stmt = session.query(AnnoMetadata)
if filter_valid:
stmt = stmt.filter(AnnoMetadata.valid == True)
metadata = stmt.all()
return [m.table_name for m in metadata]
def _get_model_from_table_name(self, table_name: str) -> DeclarativeMeta:
metadata = self.get_table_metadata(table_name)
if metadata:
if metadata["reference_table"]:
return self.schema_client.create_reference_annotation_model(
table_name,
metadata["schema_type"],
metadata["reference_table"],
)
elif metadata.get("annotation_table") and table_name != metadata.get(
"annotation_table"
):
return self.schema_client.create_segmentation_model(
metadata["annotation_table"],
metadata["schema_type"],
metadata["pcg_table_name"],
)
else:
return self.schema_client.create_annotation_model(
table_name, metadata["schema_type"]
)
else:
raise TableNotInMetadata
def _get_model_columns(self, table_name: str) -> List[tuple]:
"""Return list of column names and types of a given table
Parameters
----------
table_name : str
Table name in database
Returns
-------
list
column names and types
"""
db_columns = self.inspector.get_columns(table_name)
if not db_columns:
raise TableNameNotFound(table_name)
return [(column["name"], column["type"]) for column in db_columns]
def get_view_table(self, view_name: str) -> Table:
"""Return the sqlalchemy table object for a view"""
if self._is_cached(view_name):
return self._cached_tables[view_name]
else:
meta = MetaData(self._engine)
meta.reflect(views=True, only=[view_name])
table = meta.tables[view_name]
self._cached_tables[view_name] = table
return table
def cached_table(self, table_name: str) -> DeclarativeMeta:
"""Returns cached table 'DeclarativeMeta' callable for querying.
Parameters
----------
table_name : str
Table name in database
Returns
-------
DeclarativeMeta
SQLAlchemy callable.
"""
try:
self._load_table(table_name)
return self._cached_tables[table_name]
except KeyError as error:
raise TableNameNotFound(table_name) from error
def _load_table(self, table_name: str):
"""Load existing table into cached lookup dict instance
Parameters
----------
table_name : str
Table name to be loaded from existing database tables
Returns
-------
bool
Returns True if table exists and is loaded into cached table dict.
"""
if self._is_cached(table_name):
return True
try:
self._cached_tables[table_name] = self._get_model_from_table_name(
table_name
)
return True
except TableNotInMetadata:
# cant find the table so lets try the slow reflection before giving up
self.mapped_base = automap_base()
self.mapped_base.prepare(self._engine, reflect=True)
try:
model = self.mapped_base.classes[table_name]
self._cached_tables[table_name] = model
except KeyError as table_error:
logging.error(f"Could not load table: {table_error}")
return False
except Exception as table_error:
logging.error(f"Could not load table: {table_error}")
return False
def _is_cached(self, table_name: str) -> bool:
"""Check if table is loaded into cached instance dict of tables
Parameters
----------
table_name : str
Name of table to check if loaded
Returns
-------
bool
True if table is loaded else False.
"""
return table_name in self._cached_tables
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,969
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/migration/alembic/versions/6e7f580ff680_add_error_msg.py
|
"""Add error msg
Revision ID: 6e7f580ff680
Revises: 814d72d74e3b
Create Date: 2022-09-22 14:37:41.506933
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '6e7f580ff680'
down_revision = '814d72d74e3b'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('version_error', sa.Column('exception', sa.String(), nullable=True))
def downgrade():
op.drop_column('version_error', 'exception')
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,970
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/tests/test_interface.py
|
import logging
def test_create_or_select_database(
dadb_interface, database_metadata, annotation_metadata
):
aligned_volume = annotation_metadata["aligned_volume"]
sql_uri = database_metadata["sql_uri"]
new_sql_uri = dadb_interface.create_or_select_database(sql_uri, aligned_volume)
logging.info(new_sql_uri)
assert str(new_sql_uri) == database_metadata["sql_uri"]
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,971
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/__init__.py
|
__version__ = "5.7.3"
from .interface import DynamicAnnotationInterface
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,972
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/migration/alembic/versions/975a79461cab_add_is_merged.py
|
"""Add is merged
Revision ID: 975a79461cab
Revises: 5a1d7c0ad006
Create Date: 2022-09-15 11:51:21.484964
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "975a79461cab"
down_revision = "5a1d7c0ad006"
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
"analysisversion",
sa.Column("is_merged", sa.Boolean(), nullable=True, default=True),
)
op.execute("UPDATE analysisversion SET is_merged = True")
op.alter_column('analysisversion', 'is_merged', nullable=False)
def downgrade():
op.drop_column("analysisversion", "is_merged")
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,973
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/migration/__init__.py
|
__version__ = "5.7.3"
from dynamicannotationdb.migration.migrate import DynamicMigration
from dynamicannotationdb.migration.alembic.run import run_alembic_migration
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,974
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/interface.py
|
import logging
from sqlalchemy import create_engine
from sqlalchemy.engine.url import make_url
from sqlalchemy.pool import NullPool
from .annotation import DynamicAnnotationClient
from .database import DynamicAnnotationDB
from .models import Base
from .schema import DynamicSchemaClient
from .segmentation import DynamicSegmentationClient
class DynamicAnnotationInterface:
"""An adapter layer to access all the dynamic annotation interfaces.
Parameters
----------
url : str
URI of the database to connect to.
aligned_volume : str
name of aligned_volume database.
Interface layers available
--------------------------
annotation :
CRUD operations on annotation data as well as creating annotation tables.
database :
Database helper methods and metadata information.
segmentation :
CRUD operations on segmentation data as well as creating segmentation tables
linked to annotation tables.
schema :
Wrapper for EMAnnotationSchemas to generate dynamic sqlalchemy models.
"""
def __init__(
self, url: str, aligned_volume: str, pool_size=5, max_overflow=5
) -> None:
self._annotation = None
self._database = None
self._segmentation = None
self._schema = None
self.pool_size = pool_size
self.max_overflow = max_overflow
self._base_url = url.rpartition("/")[0]
self._aligned_volume = aligned_volume
self._sql_url = self.create_or_select_database(url, aligned_volume)
def create_or_select_database(self, url: str, aligned_volume: str):
"""Create a new database with the name of the aligned volume. Checks if
database exists before creating.
Parameters
----------
url : str
base path to the sql server
aligned_volume : str
name of aligned volume which the database name will inherent
Returns
-------
sql_url instance
"""
sql_base_uri = url.rpartition("/")[0]
sql_uri = make_url(f"{sql_base_uri}/{aligned_volume}")
temp_engine = create_engine(
sql_base_uri,
poolclass=NullPool,
isolation_level="AUTOCOMMIT",
pool_pre_ping=True,
)
with temp_engine.connect() as connection:
connection.execute("commit")
database_exists = connection.execute(
f"SELECT 1 FROM pg_catalog.pg_database WHERE datname = '{sql_uri.database}'"
)
if not database_exists.fetchone():
logging.info(f"Database {aligned_volume} does not exist.")
self._create_aligned_volume_database(sql_uri, connection)
temp_engine.dispose()
self._reset_interfaces()
self._sql_url = sql_uri
self._aligned_volume = sql_uri.database
logging.info(f"Connected to {sql_uri.database}")
return sql_uri
def _create_aligned_volume_database(self, sql_uri, connection):
logging.info(f"Creating new database: {sql_uri.database}")
connection.execute(
f"SELECT pg_terminate_backend(pid) FROM pg_stat_activity \
WHERE pid <> pg_backend_pid() AND datname = '{sql_uri.database}';"
)
# check if template exists, create if missing
template_exist = connection.execute(
"SELECT 1 FROM pg_catalog.pg_database WHERE datname = 'template_postgis'"
)
if not template_exist.fetchone():
# create postgis template db
connection.execute("CREATE DATABASE template_postgis")
# create postgis extension
template_uri = make_url(
f"{str(sql_uri).rpartition('/')[0]}/template_postgis"
)
template_engine = create_engine(
template_uri,
poolclass=NullPool,
isolation_level="AUTOCOMMIT",
pool_pre_ping=True,
)
with template_engine.connect() as template_connection:
template_connection.execute("CREATE EXTENSION IF NOT EXISTS postgis")
template_engine.dispose()
# finally create new annotation database
connection.execute(
f"CREATE DATABASE {sql_uri.database} TEMPLATE template_postgis"
)
aligned_volume_engine = create_engine(
sql_uri,
poolclass=NullPool,
isolation_level="AUTOCOMMIT",
pool_pre_ping=True,
)
try:
Base.metadata.create_all(aligned_volume_engine)
logging.info(f"{sql_uri.database} created.")
except Exception as e:
raise e
finally:
aligned_volume_engine.dispose()
def _reset_interfaces(self):
self._annotation = None
self._database = None
self._segmentation = None
self._schema = None
@property
def url(self) -> str:
return self._sql_url
@property
def aligned_volume(self) -> str:
return self._aligned_volume
@property
def annotation(self) -> DynamicAnnotationClient:
if not self._annotation:
self._annotation = DynamicAnnotationClient(self._sql_url)
return self._annotation
@property
def database(self) -> DynamicAnnotationDB:
if not self._database:
self._database = DynamicAnnotationDB(
self._sql_url, self.pool_size, self.max_overflow
)
return self._database
@property
def segmentation(self) -> DynamicSegmentationClient:
if not self._segmentation:
self._segmentation = DynamicSegmentationClient(self._sql_url)
return self._segmentation
@property
def schema(self) -> DynamicSchemaClient:
if not self._schema:
self._schema = DynamicSchemaClient()
return self._schema
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,975
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/errors.py
|
class TableNameNotFound(KeyError):
"""Table name is not found in the Metadata table"""
def __init__(self, table_name: str):
self.table_name = table_name
self.message = f"No table named '{self.table_name}' exists."
super().__init__(self.message)
def __str__(self):
return self.message
class TableAlreadyExists(KeyError):
"""Table name already exists in the Metadata table"""
class TableNotInMetadata(KeyError):
"""Table does not exist in the Metadata table"""
class IdsAlreadyExists(KeyError):
"""Annotation IDs already exists in the segmentation table"""
class SelfReferenceTableError(KeyError):
"""Annotation IDs already exists in the segmentation table"""
class BadRequest(Exception):
pass
class UpdateAnnotationError(ValueError):
def __init__(
self,
target_id: int,
superseded_id: int,
):
self.target_id = target_id
self.message = f"Annotation with ID {target_id} has already been superseded by annotation ID {superseded_id}, update annotation ID {superseded_id} instead"
super().__init__(self.message)
def __str__(self):
return f"Error update ID {self.target_id} -> {self.message}"
class AnnotationInsertLimitExceeded(ValueError):
"""Exception raised when amount of annotations exceeds defined limit."""
def __init__(
self, limit: int, length: int, message: str = "Annotation limit exceeded"
):
self.limit = limit
self.message = (
f"The insertion limit is {limit}, {length} were attempted to be inserted"
)
super().__init__(self.message)
def __str__(self):
return f"{self.limit} -> {self.message}"
class NoAnnotationsFoundWithID(Exception):
"""No annotation found with specified ID"""
def __init__(self, anno_id: int):
self.anno_id = anno_id
self.message = f"No annotation with {anno_id} exists"
super().__init__(self.message)
def __str__(self):
return f"{self.message}"
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,976
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/tests/test_segmentation.py
|
import logging
from emannotationschemas import type_mapping
from emannotationschemas.schemas.base import ReferenceAnnotation
def test_create_segmentation_table(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
pcg_table_name = annotation_metadata["pcg_table_name"]
table_added_status = dadb_interface.segmentation.create_segmentation_table(
table_name, "synapse", pcg_table_name
)
assert table_added_status == f"{table_name}__{pcg_table_name}"
def test_create_all_schema_types(dadb_interface, annotation_metadata):
pcg_table_name = annotation_metadata["pcg_table_name"]
ref_metadata = {
"reference_table": "anno_test",
"track_target_id_updates": True,
}
for schema_name, schema_type in type_mapping.items():
table_metadata = (
ref_metadata if issubclass(schema_type, ReferenceAnnotation) else None
)
table = dadb_interface.segmentation.create_segmentation_table(
f"test_{schema_name}",
schema_name,
pcg_table_name,
table_metadata=table_metadata,
)
assert f"test_{schema_name}__{pcg_table_name}" == table
def test_insert_linked_annotations(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
pcg_table_name = annotation_metadata["pcg_table_name"]
segmentation_data = [
{
"id": 8,
"pre_pt": {
"position": [121, 123, 1232],
"supervoxel_id": 2344444,
"root_id": 4,
},
"ctr_pt": {"position": [121, 123, 1232]},
"post_pt": {
"position": [121, 123, 1232],
"supervoxel_id": 3242424,
"root_id": 5,
},
"size": 2,
}
]
inserted_ids = dadb_interface.segmentation.insert_linked_annotations(
table_name, pcg_table_name, segmentation_data
)
assert inserted_ids == [8]
def test_get_linked_annotations(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
pcg_table_name = annotation_metadata["pcg_table_name"]
annotations = dadb_interface.segmentation.get_linked_annotations(
table_name, pcg_table_name, [8]
)
logging.info(annotations)
assert annotations[0]["pre_pt_supervoxel_id"] == 2344444
assert annotations[0]["pre_pt_root_id"] == 4
assert annotations[0]["post_pt_supervoxel_id"] == 3242424
assert annotations[0]["post_pt_root_id"] == 5
def test_insert_linked_segmentation(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
pcg_table_name = annotation_metadata["pcg_table_name"]
segmentation_data = [
{
"id": 2,
"pre_pt": {
"supervoxel_id": 2344444,
"root_id": 4,
},
"post_pt": {
"supervoxel_id": 3242424,
"root_id": 5,
},
"size": 2,
}
]
inserted_segmentation_data = dadb_interface.segmentation.insert_linked_segmentation(
table_name, pcg_table_name, segmentation_data
)
logging.info(inserted_segmentation_data)
assert inserted_segmentation_data == [2]
def test_update_linked_annotations(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
pcg_table_name = annotation_metadata["pcg_table_name"]
update_anno_data = {
"id": 2,
"pre_pt": {
"position": [222, 223, 1232],
},
"ctr_pt": {"position": [121, 123, 1232]},
"post_pt": {
"position": [121, 123, 1232],
},
"size": 2,
}
updated_annotations = dadb_interface.segmentation.update_linked_annotations(
table_name, pcg_table_name, update_anno_data
)
logging.info(updated_annotations)
assert updated_annotations == {2: 4}
def test_insert_another_linked_segmentation(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
pcg_table_name = annotation_metadata["pcg_table_name"]
segmentation_data = [
{
"id": 4,
"pre_pt": {
"supervoxel_id": 2344444,
"root_id": 4,
},
"post_pt": {
"supervoxel_id": 3242424,
"root_id": 5,
},
"size": 2,
}
]
inserted_segmentation_data = dadb_interface.segmentation.insert_linked_segmentation(
table_name, pcg_table_name, segmentation_data
)
logging.info(inserted_segmentation_data)
assert inserted_segmentation_data == [4]
def test_get_updated_linked_annotations(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
pcg_table_name = annotation_metadata["pcg_table_name"]
annotations = dadb_interface.segmentation.get_linked_annotations(
table_name, pcg_table_name, [4]
)
assert annotations[0]["pre_pt_supervoxel_id"] == 2344444
assert annotations[0]["pre_pt_root_id"] == 4
assert annotations[0]["post_pt_supervoxel_id"] == 3242424
assert annotations[0]["post_pt_root_id"] == 5
def test_delete_linked_annotation(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
pcg_table_name = annotation_metadata["pcg_table_name"]
anno_ids_to_delete = [4]
deleted_annotations = dadb_interface.segmentation.delete_linked_annotation(
table_name, pcg_table_name, anno_ids_to_delete
)
logging.info(deleted_annotations)
assert deleted_annotations == [4]
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,977
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/models.py
|
import enum
from emannotationschemas.models import Base
from sqlalchemy import (
Boolean,
CheckConstraint,
Column,
DateTime,
Float,
ForeignKey,
Integer,
String,
Text,
Enum,
JSON,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.dialects import postgresql
# Models that will be created in the 'materialized' database.
MatBase = declarative_base()
# Models that will be created in the 'annotation' database.
AnnotationBase = declarative_base()
class StatusEnum(enum.Enum):
AVAILABLE = "AVAILABLE"
RUNNING = "RUNNING"
FAILED = "FAILED"
EXPIRED = "EXPIRED"
def fetch_values():
return [c.value for c in StatusEnum]
class AnalysisDataBase(AnnotationBase):
__tablename__ = "analysisdatabase"
id = Column(Integer, primary_key=True)
database = Column(String(100), nullable=False)
materialize = Column(Boolean, nullable=False, default=True)
class AnalysisVersion(Base):
__tablename__ = "analysisversion"
id = Column(Integer, primary_key=True)
datastack = Column(String(100), nullable=False)
version = Column(Integer, nullable=False)
time_stamp = Column(DateTime, nullable=False)
valid = Column(Boolean)
expires_on = Column(DateTime, nullable=True)
parent_version = Column(
Integer,
ForeignKey("analysisversion.id"),
nullable=True,
)
status = Column(
postgresql.ENUM(
"AVAILABLE", "RUNNING", "FAILED", "EXPIRED", name="version_status"
),
nullable=False,
)
is_merged = Column(Boolean, default=True)
def __repr__(self):
return f"{self.datastack}__mat{self.version}"
class AnalysisTable(Base):
__tablename__ = "analysistables"
id = Column(Integer, primary_key=True)
aligned_volume = Column(String(100), nullable=False)
schema = Column(String(100), nullable=False)
table_name = Column(String(100), nullable=False)
valid = Column(Boolean)
created = Column(DateTime, nullable=False)
analysisversion_id = Column(Integer, ForeignKey("analysisversion.id"))
analysisversion = relationship("AnalysisVersion")
class VersionErrorTable(Base):
__tablename__ = "version_error"
id = Column(Integer, primary_key=True)
exception = Column(String, nullable=True)
error = Column(JSON, nullable=True)
analysisversion_id = Column(Integer, ForeignKey("analysisversion.id"))
analysisversion = relationship("AnalysisVersion")
class MaterializedMetadata(MatBase):
__tablename__ = "materializedmetadata"
id = Column(Integer, primary_key=True)
schema = Column(String(100), nullable=False)
table_name = Column(String(100), nullable=False)
row_count = Column(Integer, nullable=False)
materialized_timestamp = Column(DateTime, nullable=False)
segmentation_source = Column(String(255), nullable=True)
is_merged = Column(Boolean, nullable=True)
class AnnoMetadata(Base):
__tablename__ = "annotation_table_metadata"
id = Column(Integer, primary_key=True)
schema_type = Column(String(100), nullable=False)
table_name = Column(String(100), nullable=False, unique=True)
valid = Column(Boolean)
created = Column(DateTime, nullable=False)
deleted = Column(DateTime, nullable=True)
user_id = Column(String(255), nullable=False)
description = Column(Text, nullable=False)
notice_text = Column(Text, nullable=True)
reference_table = Column(String(100), nullable=True)
flat_segmentation_source = Column(String(300), nullable=True)
voxel_resolution_x = Column(Float, nullable=False)
voxel_resolution_y = Column(Float, nullable=False)
voxel_resolution_z = Column(Float, nullable=False)
write_permission = Column(
postgresql.ENUM("PRIVATE", "GROUP", "PUBLIC", name="read_permission"),
nullable=False,
)
read_permission = Column(
postgresql.ENUM("PRIVATE", "GROUP", "PUBLIC", name="read_permission"),
nullable=False,
)
last_modified = Column(DateTime, nullable=False)
class SegmentationMetadata(Base):
__tablename__ = "segmentation_table_metadata"
id = Column(Integer, primary_key=True)
schema_type = Column(String(100), nullable=False)
table_name = Column(String(100), nullable=False, unique=True)
valid = Column(Boolean)
created = Column(DateTime, nullable=False)
deleted = Column(DateTime, nullable=True)
segmentation_source = Column(String(255), nullable=True)
pcg_table_name = Column(String(255), nullable=False)
last_updated = Column(DateTime, nullable=True)
annotation_table = Column(
String(100), ForeignKey("annotation_table_metadata.table_name")
)
class CombinedTableMetadata(Base):
__tablename__ = "combined_table_metadata"
__table_args__ = (
CheckConstraint(
"reference_table <> annotation_table", name="not_self_referenced"
),
)
id = Column(Integer, primary_key=True)
reference_table = Column(
String(100), ForeignKey("annotation_table_metadata.table_name")
)
annotation_table = Column(
String(100), ForeignKey("annotation_table_metadata.table_name")
)
valid = Column(Boolean)
created = Column(DateTime, nullable=False)
deleted = Column(DateTime, nullable=True)
description = Column(Text, nullable=False)
# a model of a table that contains table_views, their descriptions and datastacks
class AnalysisView(Base):
__tablename__ = "analysisviews"
id = Column(Integer, primary_key=True)
table_name = Column(String(100), nullable=False)
description = Column(Text, nullable=False)
datastack_name = Column(String(100), nullable=False)
voxel_resolution_x = Column(Float, nullable=False)
voxel_resolution_y = Column(Float, nullable=False)
voxel_resolution_z = Column(Float, nullable=False)
notice_text = Column(Text, nullable=True)
live_compatible = Column(Boolean, nullable=False)
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,978
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/migration/alembic/versions/7c79eff751b4_add_parent_version_column.py
|
"""Add parent_version column
Revision ID: 7c79eff751b4
Revises: ef5c2d7f96d8
Create Date: 2022-08-08 10:02:40.077429
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy import engine_from_config
from sqlalchemy.engine import reflection
# revision identifiers, used by Alembic.
revision = "7c79eff751b4"
down_revision = "ef5c2d7f96d8"
branch_labels = None
depends_on = "ef5c2d7f96d8"
def _table_has_column(table, column):
config = op.get_context().config
engine = engine_from_config(
config.get_section(config.config_ini_section), prefix="sqlalchemy."
)
insp = reflection.Inspector.from_engine(engine)
return any(column in col["name"] for col in insp.get_columns(table))
def upgrade():
with op.batch_alter_table("analysisversion", schema=None) as batch_op:
op.add_column(
"analysisversion",
sa.Column("parent_version", sa.Integer(), nullable=True),
)
op.create_foreign_key(
None, "analysisversion", "analysisversion", ["parent_version"], ["id"]
)
def downgrade():
op.drop_constraint(None, "analysisversion", type_="foreignkey")
op.drop_column("analysisversion", "parent_version")
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,979
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/segmentation.py
|
import datetime
import logging
from typing import List
from marshmallow import INCLUDE
from .database import DynamicAnnotationDB
from .errors import (
AnnotationInsertLimitExceeded,
IdsAlreadyExists,
UpdateAnnotationError,
)
from .key_utils import build_segmentation_table_name
from .models import SegmentationMetadata
from .schema import DynamicSchemaClient
from .errors import TableNameNotFound
class DynamicSegmentationClient:
def __init__(self, sql_url: str) -> None:
self.db = DynamicAnnotationDB(sql_url)
self.schema = DynamicSchemaClient()
def create_segmentation_table(
self,
table_name: str,
schema_type: str,
segmentation_source: str,
table_metadata: dict = None,
with_crud_columns: bool = False,
):
"""Create a segmentation table with the primary key as foreign key
to the annotation table.
Parameters
----------
table_name : str
Name of annotation table to link to.
schema_type : str
schema type
segmentation_source : str
name of segmentation data source, used to create table name.
table_metadata : dict, optional
metadata to extend table behavior, by default None
with_crud_columns : bool, optional
add additional columns to track CRUD operations on rows, by default False
Returns
-------
str
name of segmentation table.
"""
segmentation_table_name = build_segmentation_table_name(
table_name, segmentation_source
)
self.db._check_table_is_unique(segmentation_table_name)
SegmentationModel = self.schema.create_segmentation_model(
table_name,
schema_type,
segmentation_source,
table_metadata,
with_crud_columns,
)
if (
not self.db.cached_session.query(SegmentationMetadata)
.filter(SegmentationMetadata.table_name == segmentation_table_name)
.scalar()
):
SegmentationModel.__table__.create(bind=self.db._engine, checkfirst=True)
creation_time = datetime.datetime.utcnow()
metadata_dict = {
"annotation_table": table_name,
"schema_type": schema_type,
"table_name": segmentation_table_name,
"valid": True,
"created": creation_time,
"pcg_table_name": segmentation_source,
}
seg_metadata = SegmentationMetadata(**metadata_dict)
try:
self.db.cached_session.add(seg_metadata)
self.db.commit_session()
except Exception as e:
logging.error(f"SQL ERROR: {e}")
return segmentation_table_name
def get_linked_tables(self, table_name: str, pcg_table_name: str) -> List:
try:
return (
self.db.cached_session.query(SegmentationMetadata)
.filter(SegmentationMetadata.annotation_table == table_name)
.filter(SegmentationMetadata.pcg_table_name == pcg_table_name)
.all()
)
except Exception as e:
raise AttributeError(
f"No table found with name '{table_name}'. Error: {e}"
) from e
def get_segmentation_table_metadata(self, table_name: str, pcg_table_name: str):
seg_table_name = build_segmentation_table_name(table_name, pcg_table_name)
try:
result = (
self.db.cached_session.query(SegmentationMetadata)
.filter(SegmentationMetadata.table_name == seg_table_name)
.one()
)
return self.db.get_automap_items(result)
except Exception as e:
self.db.cached_session.rollback()
return None
def get_linked_annotations(
self, table_name: str, pcg_table_name: str, annotation_ids: List[int]
) -> dict:
"""Get list of annotations from database by id.
Parameters
----------
table_name : str
name of annotation table
pcg_table_name: str
name of chunked graph reference table
annotation_ids : int
annotation id
Returns
-------
list
list of annotation data dicts
"""
metadata = self.db.get_table_metadata(table_name)
schema_type = metadata["schema_type"]
seg_table_name = build_segmentation_table_name(table_name, pcg_table_name)
AnnotationModel = self.db.cached_table(table_name)
SegmentationModel = self.db.cached_table(seg_table_name)
annotations = (
self.db.cached_session.query(AnnotationModel, SegmentationModel)
.join(SegmentationModel, SegmentationModel.id == AnnotationModel.id)
.filter(AnnotationModel.id.in_(list(annotation_ids)))
.all()
)
FlatSchema = self.schema.get_flattened_schema(schema_type)
schema = FlatSchema(unknown=INCLUDE)
data = []
for anno, seg in annotations:
anno_data = anno.__dict__
seg_data = seg.__dict__
anno_data = {
k: v for (k, v) in anno_data.items() if k != "_sa_instance_state"
}
seg_data = {
k: v for (k, v) in seg_data.items() if k != "_sa_instance_state"
}
anno_data["created"] = str(anno_data.get("created"))
anno_data["deleted"] = str(anno_data.get("deleted"))
merged_data = {**anno_data, **seg_data}
data.append(merged_data)
return schema.load(data, many=True)
def insert_linked_segmentation(
self, table_name: str, pcg_table_name: str, segmentation_data: List[dict]
):
"""Insert segmentation data by linking to annotation ids.
Limited to 10,000 inserts. If more consider using a bulk insert script.
Parameters
----------
table_name : str
name of annotation table
pcg_table_name: str
name of chunked graph reference table
segmentation_data : List[dict]
List of dictionaries of single segmentation data.
"""
insertion_limit = 10_000
if len(segmentation_data) > insertion_limit:
raise AnnotationInsertLimitExceeded(len(segmentation_data), insertion_limit)
metadata = self.db.get_table_metadata(table_name)
schema_type = metadata["schema_type"]
seg_table_name = build_segmentation_table_name(table_name, pcg_table_name)
SegmentationModel = self.db.cached_table(seg_table_name)
formatted_seg_data = []
_, segmentation_schema = self.schema.split_flattened_schema(schema_type)
for segmentation in segmentation_data:
segmentation_data = self.schema.flattened_schema_data(segmentation)
flat_data = self.schema._map_values_to_schema(
segmentation_data, segmentation_schema
)
flat_data["id"] = segmentation["id"]
formatted_seg_data.append(flat_data)
segs = [
SegmentationModel(**segmentation_data)
for segmentation_data in formatted_seg_data
]
ids = [data["id"] for data in formatted_seg_data]
q = self.db.cached_session.query(SegmentationModel).filter(
SegmentationModel.id.in_(list(ids))
)
ids_exist = self.db.cached_session.query(q.exists()).scalar()
if ids_exist:
raise IdsAlreadyExists(f"Annotation IDs {ids} already linked in database ")
self.db.cached_session.add_all(segs)
seg_ids = [seg.id for seg in segs]
self.db.commit_session()
return seg_ids
def insert_linked_annotations(
self, table_name: str, pcg_table_name: str, annotations: List[dict]
):
"""Insert annotations by type and schema. Limited to 10,000
annotations. If more consider using a bulk insert script.
Parameters
----------
table_name : str
name of annotation table
pcg_table_name: str
name of chunked graph reference table
annotations : dict
Dictionary of single annotation data.
"""
insertion_limit = 10_000
if len(annotations) > insertion_limit:
raise AnnotationInsertLimitExceeded(len(annotations), insertion_limit)
metadata = self.db.get_table_metadata(table_name)
schema_type = metadata["schema_type"]
seg_table_name = build_segmentation_table_name(table_name, pcg_table_name)
formatted_anno_data = []
formatted_seg_data = []
AnnotationModel = self.db.cached_table(table_name)
SegmentationModel = self.db.cached_table(seg_table_name)
logging.info(f"{AnnotationModel.__table__.columns}")
logging.info(f"{SegmentationModel.__table__.columns}")
for annotation in annotations:
anno_data, seg_data = self.schema.split_flattened_schema_data(
schema_type, annotation
)
if annotation.get("id"):
anno_data["id"] = annotation["id"]
if hasattr(AnnotationModel, "created"):
anno_data["created"] = datetime.datetime.utcnow()
anno_data["valid"] = True
formatted_anno_data.append(anno_data)
formatted_seg_data.append(seg_data)
logging.info(f"DATA TO BE INSERTED: {formatted_anno_data} {formatted_seg_data}")
try:
annos = [
AnnotationModel(**annotation_data)
for annotation_data in formatted_anno_data
]
except Exception as e:
raise e
self.db.cached_session.add_all(annos)
self.db.cached_session.flush()
segs = [
SegmentationModel(**segmentation_data, id=anno.id)
for segmentation_data, anno in zip(formatted_seg_data, annos)
]
ids = [anno.id for anno in annos]
self.db.cached_session.add_all(segs)
self.db.commit_session()
return ids
def update_linked_annotations(
self, table_name: str, pcg_table_name: str, annotation: dict
):
"""Updates an annotation by inserting a new row. The original annotation
will refer to the new row with a superseded_id. Does not update inplace.
Parameters
----------
table_name : str
name of annotation table
pcg_table_name: str
name of chunked graph reference table
annotation : dict, annotation to update by ID
"""
anno_id = annotation.get("id")
if not anno_id:
return "Annotation requires an 'id' to update targeted row"
metadata = self.db.get_table_metadata(table_name)
schema_type = metadata["schema_type"]
seg_table_name = build_segmentation_table_name(table_name, pcg_table_name)
AnnotationModel = self.db.cached_table(table_name)
SegmentationModel = self.db.cached_table(seg_table_name)
new_annotation, __ = self.schema.split_flattened_schema_data(
schema_type, annotation
)
new_annotation["created"] = datetime.datetime.utcnow()
new_annotation["valid"] = True
new_data = AnnotationModel(**new_annotation)
data = (
self.db.cached_session.query(AnnotationModel, SegmentationModel)
.filter(AnnotationModel.id == anno_id)
.filter(SegmentationModel.id == anno_id)
.all()
)
update_map = {}
for old_anno, old_seg in data:
if old_anno.superceded_id:
raise UpdateAnnotationError(anno_id, old_anno.superceded_id)
self.db.cached_session.add(new_data)
self.db.cached_session.flush()
deleted_time = datetime.datetime.utcnow()
old_anno.deleted = deleted_time
old_anno.superceded_id = new_data.id
old_anno.valid = False
update_map[anno_id] = new_data.id
self.db.commit_session()
return update_map
def delete_linked_annotation(
self, table_name: str, pcg_table_name: str, annotation_ids: List[int]
):
"""Mark annotations by for deletion by list of ids.
Parameters
----------
table_name : str
name of annotation table
pcg_table_name: str
name of chunked graph reference table
annotation_ids : List[int]
list of ids to delete
Returns
-------
Raises
------
"""
seg_table_name = build_segmentation_table_name(table_name, pcg_table_name)
AnnotationModel = self.db.cached_table(table_name)
SegmentationModel = self.db.cached_table(seg_table_name)
annotations = (
self.db.cached_session.query(AnnotationModel)
.join(SegmentationModel, SegmentationModel.id == AnnotationModel.id)
.filter(AnnotationModel.id.in_(list(annotation_ids)))
.all()
)
if not annotations:
return None
deleted_ids = [annotation.id for annotation in annotations]
deleted_time = datetime.datetime.utcnow()
for annotation in annotations:
annotation.deleted = deleted_time
annotation.valid = False
self.db.commit_session()
return deleted_ids
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,980
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/tests/test_database.py
|
import logging
import datetime
import pytest
from sqlalchemy import Table
from sqlalchemy.ext.declarative.api import DeclarativeMeta
from emannotationschemas import type_mapping
def test_get_table_metadata(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
schema_type = annotation_metadata["schema_type"]
metadata = dadb_interface.database.get_table_metadata(table_name)
logging.info(metadata)
assert metadata["schema_type"] == schema_type
assert metadata["table_name"] == "anno_test"
assert metadata["user_id"] == "foo@bar.com"
assert metadata["description"] == "New description"
assert metadata["voxel_resolution_x"] == 4.0
# test with filter to get a col value
metadata_value = dadb_interface.database.get_table_metadata(
table_name, filter_col="valid"
)
logging.info(metadata)
assert metadata_value == True
# test for missing column
with pytest.raises(AttributeError) as e:
bad_return = dadb_interface.database.get_table_metadata(
table_name, "missing_column"
)
assert (
str(e.value) == "type object 'AnnoMetadata' has no attribute 'missing_column'"
)
def test_get_table_sql_metadata(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
sql_metadata = dadb_interface.database.get_table_sql_metadata(table_name)
logging.info(sql_metadata)
assert isinstance(sql_metadata, Table)
def test__get_model_from_table_name(dadb_interface):
model_names = [f"test_{schema_name}" for schema_name in type_mapping]
for model_name in model_names:
model_instance = dadb_interface.database._get_model_from_table_name(model_name)
assert isinstance(model_instance, DeclarativeMeta)
def test_get_model_columns(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
model_columns = dadb_interface.database._get_model_columns(table_name)
logging.info(model_columns)
assert isinstance(model_columns, list)
def test__get_existing_table_ids(dadb_interface):
table_names = dadb_interface.database._get_existing_table_names()
assert isinstance(table_names, list)
def test_get_table_row_count(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
result = dadb_interface.database.get_table_row_count(table_name)
logging.info(f"{table_name} row count: {result}")
assert result == 3
def test_get_table_valid_row_count(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
result = dadb_interface.database.get_table_row_count(table_name, filter_valid=True)
logging.info(f"{table_name} valid row count: {result}")
assert result == 2
def test_get_table_valid_timestamp_row_count(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
ts = datetime.datetime.utcnow() - datetime.timedelta(days=5)
result = dadb_interface.database.get_table_row_count(
table_name, filter_valid=True, filter_timestamp=str(ts)
)
logging.info(f"{table_name} valid and timestamped row count: {result}")
assert result == 0
def test_get_annotation_table_size(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
table_size = dadb_interface.database.get_annotation_table_size(table_name)
assert table_size == 3
def test_load_table(dadb_interface, annotation_metadata):
table_name = annotation_metadata["table_name"]
is_loaded = dadb_interface.database._load_table(table_name)
assert is_loaded is True
table_name = "non_existing_table"
is_loaded = dadb_interface.database._load_table(table_name)
assert is_loaded is False
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,981
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/migration/alembic/versions/309cf493a1e2_adding_warning_field.py
|
"""adding warning field
Revision ID: 309cf493a1e2
Revises: 8fdc843fc202
Create Date: 2022-10-20 10:25:05.014779
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '309cf493a1e2'
down_revision = '8fdc843fc202'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('annotation_table_metadata', sa.Column('notice_text', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('annotation_table_metadata', 'notice_text')
# ### end Alembic commands ###
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,982
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/migration/alembic/versions/5a1d7c0ad006_add_status_column.py
|
"""add status column
Revision ID: 5a1d7c0ad006
Revises: 7c79eff751b4
Create Date: 2022-08-16 13:47:38.842604
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "5a1d7c0ad006"
down_revision = "7c79eff751b4"
branch_labels = None
depends_on = None
def upgrade():
status_enum = postgresql.ENUM(
"AVAILABLE", "RUNNING", "FAILED", "EXPIRED", name="version_status"
)
status_enum.create(op.get_bind())
op.add_column(
"analysisversion",
sa.Column(
"status",
postgresql.ENUM(
"AVAILABLE", "RUNNING", "FAILED", "EXPIRED", name="version_status"
),
nullable=True,
),
)
op.execute("UPDATE analysisversion SET status = 'EXPIRED'")
op.alter_column('analysisversion', 'status', nullable=False)
def downgrade():
op.drop_column("analysisversion", "status")
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,983
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/key_utils.py
|
def build_segmentation_table_name(
annotation_table_name: str, segmentation_source: str
) -> str:
"""Creates a table name that combines annotation table and appends
segmentation table name
Parameters
----------
annotation_table_name : str
exiting annotation table name
segmentation_source : str
name of chunkedgraph table
Returns
-------
str
formatted name of table combining the annotation table id with
chunkedgraph segmentation source name
"""
return f"{annotation_table_name}__{segmentation_source}"
def get_table_name_from_table_id(table_id: str) -> str:
"""Extracts table name from table_id string
Parameters
----------
table_id : str
Returns
-------
str
table name in table id
"""
return table_id.split("__")[-1]
def get_dataset_name_from_table_id(table_id: str) -> str:
"""Extracts the aligned volume name from table id string
Parameters
----------
table_id : str
Returns
-------
str
name of aligned volume in table id
"""
return table_id.split("__")[1]
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,984
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/tests/test_errors.py
|
import pytest
from dynamicannotationdb.errors import (
TableNameNotFound,
UpdateAnnotationError,
AnnotationInsertLimitExceeded,
NoAnnotationsFoundWithID,
)
def table_not_found():
raise TableNameNotFound("test_table")
def update_annotation_error():
raise UpdateAnnotationError(1, 3)
def annotation_insert_limit():
raise AnnotationInsertLimitExceeded(100, 1000)
def no_annotation_found_with_id():
raise NoAnnotationsFoundWithID(1)
def test_table_name_not_found():
with pytest.raises(TableNameNotFound) as excinfo:
table_not_found()
assert excinfo.value.message == "No table named 'test_table' exists."
def test_update_annotation_error():
with pytest.raises(UpdateAnnotationError) as excinfo:
update_annotation_error()
assert (
excinfo.value.message
== "Annotation with ID 1 has already been superseded by annotation ID 3, update annotation ID 3 instead"
)
def test_annotation_insert_limit_exceeded():
with pytest.raises(AnnotationInsertLimitExceeded) as excinfo:
annotation_insert_limit()
assert (
excinfo.value.message
== "The insertion limit is 100, 1000 were attempted to be inserted"
)
def test_no_annotations_found_with_id():
with pytest.raises(NoAnnotationsFoundWithID) as excinfo:
no_annotation_found_with_id()
assert excinfo.value.message == "No annotation with 1 exists"
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,985
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/schema.py
|
from typing import Sequence, Tuple
from emannotationschemas import get_schema
from emannotationschemas import models as em_models
from emannotationschemas.flatten import create_flattened_schema, flatten_dict
from emannotationschemas.schemas.base import ReferenceAnnotation, SegmentationField
from marshmallow import EXCLUDE, Schema
from .errors import SelfReferenceTableError, TableNameNotFound
class DynamicSchemaClient:
@staticmethod
def get_schema(schema_type: str):
return get_schema(schema_type)
@staticmethod
def get_flattened_schema(schema_type: str):
Schema = get_schema(schema_type)
return em_models.create_flattened_schema(Schema)
@staticmethod
def create_annotation_model(
table_name: str,
schema_type: str,
table_metadata: dict = None,
with_crud_columns: bool = True,
reset_cache: bool = False,
):
return em_models.make_model_from_schema(
table_name=table_name,
schema_type=schema_type,
table_metadata=table_metadata,
with_crud_columns=with_crud_columns,
reset_cache=reset_cache,
)
@staticmethod
def create_segmentation_model(
table_name: str,
schema_type: str,
segmentation_source: str,
table_metadata: dict = None,
reset_cache: bool = False,
):
return em_models.make_model_from_schema(
table_name=table_name,
schema_type=schema_type,
segmentation_source=segmentation_source,
table_metadata=table_metadata,
reset_cache=reset_cache,
)
@staticmethod
def create_reference_annotation_model(
table_name: str,
schema_type: str,
target_table: str,
segmentation_source: str = None,
with_crud_columns: bool = True,
reset_cache: bool = False,
):
return em_models.make_model_from_schema(
table_name=table_name,
schema_type=schema_type,
segmentation_source=segmentation_source,
table_metadata={"reference_table": target_table},
with_crud_columns=with_crud_columns,
reset_cache=reset_cache,
)
@staticmethod
def create_flat_model(
table_name: str,
schema_type: str,
table_metadata: dict = None,
with_crud_columns: bool = False,
reset_cache: bool = False,
):
return em_models.make_flat_model(
table_name=table_name,
schema_type=schema_type,
table_metadata=table_metadata,
with_crud_columns=with_crud_columns,
reset_cache=reset_cache
)
@staticmethod
def create_dataset_models(
aligned_volume: str,
schemas_and_tables: Sequence[tuple],
segmentation_source: str = None,
include_contacts: bool = False,
metadata_dict: dict = None,
with_crud_columns: bool = True,
reset_cache: bool = False,
):
return em_models.make_dataset_models(
aligned_volume,
schemas_and_tables,
segmentation_source,
include_contacts,
metadata_dict,
with_crud_columns,
)
@staticmethod
def get_split_models(
table_name: str,
schema_type: str,
segmentation_source: str,
table_metadata: dict = None,
anno_crud_columns: bool = True,
seg_crud_columns: bool = False,
reset_cache: bool = False,
):
"""Return the annotation and segmentation models from a
supplied schema. If the schema type requires no segmentation fields
return only the annotation model and None for the segmentation model.
Parameters
----------
table_name : str
name of the table
schema_type :
schema type, must be a valid type (hint see :func:`emannotationschemas.get_types`)
segmentation_source : str, optional
pcg table to use for root id lookups will return the
segmentation model if not None, by default None
table_metadata : dict, optional
optional metadata dict, by default None
anno_crud_columns : bool, optional
add additional created, deleted and superceded_id columns on
the annotation table model, by default True
seg_crud_columns : bool, optional
add additional created, deleted and superceded_id columns on
the segmentation table model, by default False
"""
anno_model = em_models.make_model_from_schema(
table_name=table_name,
schema_type=schema_type,
segmentation_source=None,
table_metadata=table_metadata,
with_crud_columns=anno_crud_columns,
reset_cache=reset_cache,
)
if DynamicSchemaClient.is_segmentation_table_required(schema_type):
seg_model = em_models.make_model_from_schema(
table_name=table_name,
schema_type=schema_type,
segmentation_source=segmentation_source,
table_metadata=table_metadata,
with_crud_columns=seg_crud_columns,
reset_cache=reset_cache,
)
return anno_model, seg_model
return anno_model, None
@staticmethod
def flattened_schema_data(data):
return flatten_dict(data)
@staticmethod
def is_segmentation_table_required(schema_type: str) -> bool:
"""Check if schema contains any 'Segmentation Fields' column
types and returns boolean"""
schema = get_schema(schema_type)
flat_schema = create_flattened_schema(schema)
segmentation_columns = {
key: field
for key, field in flat_schema._declared_fields.items()
if isinstance(field, SegmentationField)
}
return bool(segmentation_columns)
@staticmethod
def split_flattened_schema(schema_type: str):
schema_type = get_schema(schema_type)
(
flat_annotation_schema,
flat_segmentation_schema,
) = em_models.split_annotation_schema(schema_type)
return flat_annotation_schema, flat_segmentation_schema
def split_flattened_schema_data(
self, schema_type: str, data: dict
) -> Tuple[dict, dict]:
schema_type = get_schema(schema_type)
schema = schema_type(context={"postgis": True})
data = schema.load(data, unknown=EXCLUDE)
check_is_nested = any(isinstance(i, dict) for i in data.values())
if check_is_nested:
data = flatten_dict(data)
(
flat_annotation_schema,
flat_segmentation_schema,
) = em_models.split_annotation_schema(schema_type)
return (
self._map_values_to_schema(data, flat_annotation_schema),
self._map_values_to_schema(data, flat_segmentation_schema),
)
@staticmethod
def _map_values_to_schema(data: dict, schema: Schema):
return {
key: data[key]
for key, value in schema._declared_fields.items()
if key in data
}
def _parse_schema_metadata_params(
self,
schema_type: str,
table_name: str,
table_metadata: dict,
existing_tables: list,
):
reference_table = None
track_updates = None
for param, value in table_metadata.items():
if param == "reference_table":
Schema = self.get_schema(schema_type)
if not issubclass(Schema, ReferenceAnnotation):
raise TypeError(
"Reference table must be a ReferenceAnnotation schema type"
)
if table_name == value:
raise SelfReferenceTableError(
f"{reference_table} must target a different table not {table_name}"
)
if value not in existing_tables:
raise TableNameNotFound(value)
reference_table = value
elif param == "track_target_id_updates":
track_updates = value
return reference_table, track_updates
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,986
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/migration/migrate.py
|
import logging
from geoalchemy2.types import Geometry
from psycopg2.errors import DuplicateSchema
from sqlalchemy import MetaData, create_engine, ForeignKeyConstraint
from sqlalchemy.engine.url import make_url
from sqlalchemy.pool import NullPool
from sqlalchemy import MetaData, Table
from sqlalchemy.sql.ddl import AddConstraint
from sqlalchemy.schema import DropConstraint
from sqlalchemy.exc import ProgrammingError
from dynamicannotationdb.database import DynamicAnnotationDB
from dynamicannotationdb.models import AnnoMetadata
from dynamicannotationdb.schema import DynamicSchemaClient
from emannotationschemas.errors import UnknownAnnotationTypeException
from emannotationschemas.migrations.run import run_migration
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# SQL commands
def alter_column_name(table_name: str, current_col_name: str, new_col_name: str) -> str:
return f"ALTER TABLE {table_name} RENAME {current_col_name} TO {new_col_name}"
def add_column(table_name: str, column_spec: str) -> str:
return f"ALTER TABLE {table_name} ADD IF NOT EXISTS {column_spec}"
def add_primary_key(table_name: str, column_name: str):
return f"ALTER TABLE {table_name} add primary key({column_name})"
def add_index(table_name: str, column_name: str, is_spatial=False):
if is_spatial:
index_name = f"idx_{table_name}_{column_name}"
column_index_type = (
f"{table_name} USING GIST ({column_name} gist_geometry_ops_nd)"
)
else:
index_name = f"ix_{table_name}_{column_name}"
column_index_type = f"{table_name} ({column_name})"
return f"CREATE INDEX IF NOT EXISTS {index_name} ON {column_index_type}"
def add_foreign_key(
table_name: str,
foreign_key_name: str,
foreign_key_column: str,
foreign_key_table: str,
target_column: str,
):
return f"""ALTER TABLE "{table_name}"
ADD CONSTRAINT {foreign_key_name}
FOREIGN KEY ("{foreign_key_column}")
REFERENCES "{foreign_key_table}" ("{target_column}");"""
class DynamicMigration:
"""Migrate schemas with new columns and handle index creation."""
def __init__(
self, sql_uri: str, target_db: str, schema_db: str = "schemas"
) -> None:
self._base_uri = sql_uri.rpartition("/")[0]
self.target_db_sql_uri = make_url(f"{self._base_uri}/{target_db}")
self.schema_sql_uri = make_url(f"{self._base_uri}/{schema_db}")
self.target_database, self.target_inspector = self.setup_inspector(
self.target_db_sql_uri
)
self.schema_client = DynamicSchemaClient()
temp_engine = create_engine(
self._base_uri,
poolclass=NullPool,
isolation_level="AUTOCOMMIT",
pool_pre_ping=True,
)
with temp_engine.connect() as connection:
connection.execute("commit")
database_exists = connection.execute(
f"SELECT 1 FROM pg_catalog.pg_database WHERE datname = '{schema_db}'"
)
if not database_exists.fetchone():
logging.warning(f"Cannot connect to {schema_db}, attempting to create")
connection.execute(f"CREATE DATABASE {schema_db}")
logging.info(f"{schema_db} created")
temp_engine.dispose()
try:
logging.info("Running migrations")
run_migration(str(self.schema_sql_uri))
except DuplicateSchema as e:
logging.warning(f"Error migrating schema database: {e}")
self.schema_database, self.schema_inspector = self.setup_inspector(
self.schema_sql_uri
)
def setup_inspector(self, sql_uri: str):
database_client = DynamicAnnotationDB(sql_uri)
database_inspector = database_client.inspector
return database_client, database_inspector
def get_table_info(self):
target_tables = sorted(set(self.target_inspector.get_table_names()))
schema_tables = sorted(set(self.schema_inspector.get_table_names()))
return target_tables, schema_tables
def _get_target_schema_types(self, schema_type: str):
try:
schema = self.schema_client.get_schema(schema_type)
except UnknownAnnotationTypeException as e:
logging.info(f"Table {schema_type} is not an em annotation schemas: {e}")
return (
self.target_database.cached_session.query(
AnnoMetadata.table_name, AnnoMetadata.schema_type
)
.filter(AnnoMetadata.schema_type == schema_type)
.all()
)
def _get_table_schema_type(self, table_name: str):
schema_type = (
self.target_database.cached_session.query(AnnoMetadata.schema_type)
.filter(AnnoMetadata.table_name == table_name)
.one()
)
return schema_type[0]
def get_target_schema(self, table_name: str):
return self.target_database.get_table_sql_metadata(table_name)
def get_schema_from_migration(self, schema_table_name: str):
return self.schema_database.get_table_sql_metadata(schema_table_name)
def upgrade_table_from_schema(self, table_name: str, dry_run: bool = True):
"""Migrate a schema if the schema model is present in the database.
If there are missing columns in the database it will add new
columns.
Parameters
----------
table_name : str
table to migrate.
dry_run : bool
return a map of columns to add, does not affect the database.
"""
if table_name not in self.target_database._get_existing_table_names():
raise f"{table_name} not found."
db_table, model_table, columns_to_create = self.get_table_diff(table_name)
ddl_client = self.target_database.engine.dialect.ddl_compiler(
self.target_database.engine.dialect, None
)
migrations = {}
for column in columns_to_create:
model_column = model_table.c.get(column)
col_spec = ddl_client.get_column_specification(model_column)
sql = add_column(db_table.name, col_spec)
sql = self.set_default_non_nullable(db_table, column, model_column, sql)
col_to_migrate = f"{db_table.name}.{model_column.name}"
logging.info(f"Adding column {col_to_migrate}")
migrations[col_to_migrate] = sql
# get missing table indexes
index_sql_commands = self.get_missing_indexes(table_name)
migration_map = {}
if migrations:
migration_map = {"Table": table_name, "Columns": migrations}
if index_sql_commands:
migration_map["Indexes"] = index_sql_commands
if dry_run:
logging.info(
"Dry run mode. Set dry run to False to apply changes to the db."
)
return migration_map
try:
engine = self.target_database.engine
with engine.connect() as conn:
if migrations:
for command in migrations.values():
logging.info(f"Running command: {command}")
conn.execute(command)
if index_sql_commands:
for index_name, sql_command in index_sql_commands.items():
logging.info(f"Creating index: {index_name}")
conn.execute(sql_command)
self.target_database.base.metadata.reflect()
return migration_map
except Exception as e:
self.target_database.cached_session.rollback()
raise e
def apply_cascade_option_to_tables(self, dry_run: bool = True):
metadata = MetaData(bind=self.target_database.engine)
metadata.reflect(bind=self.target_database.engine)
fkey_mappings = []
for table in metadata.tables:
table_metadata = self.target_database.get_table_metadata(table)
if table_metadata:
table = metadata.tables[table]
try:
fkey_mapping = self.add_cascade_delete_to_fkey(table, dry_run)
if fkey_mapping:
fkey_mappings.append(fkey_mapping)
except Exception as error:
raise error
if not fkey_mappings:
logging.info("No tables to migrate fkey constraints")
return None
return fkey_mappings
def add_cascade_delete_to_fkey(self, table: Table, dry_run: bool = True):
table_name = table.name
fkeys_to_drop = {}
fkey_to_add = {}
for fk in self.target_inspector.get_foreign_keys(table_name):
# check if the foreign key has no 'ondelete' option
if not fk["options"].get("ondelete"):
# drop the foreign key constraint
fkey = ForeignKeyConstraint(
[table.c[c] for c in fk["constrained_columns"]],
[fk["referred_table"] + "." + c for c in fk["referred_columns"]],
name=fk["name"],
)
drop_constraint = DropConstraint(fkey)
fkeys_to_drop[fkey.name] = str(drop_constraint)
# create a new foreign key constraint with the specified 'ondelete' option
new_fkey = ForeignKeyConstraint(
[table.c[c] for c in fk["constrained_columns"]],
[fk["referred_table"] + "." + c for c in fk["referred_columns"]],
name=fk["name"],
ondelete="CASCADE",
)
add_constraint = AddConstraint(new_fkey)
fkey_to_add[new_fkey.name] = str(add_constraint)
if not dry_run:
with self.target_database.engine.begin() as conn:
conn.execute(drop_constraint)
conn.execute(add_constraint)
logging.info(f"Table {table_name} altered with CASCADE DELETE")
return (
{
f"Table Name: {table_name}": {
"Fkeys to drop": fkeys_to_drop,
"Fkeys to add": fkey_to_add,
}
}
if fkeys_to_drop or fkey_to_add
else None
)
def upgrade_annotation_models(self, dry_run: bool = True):
"""Upgrades annotation models present in the database
if underlying schemas have changed.
Raises
------
e
SQL Error
"""
tables = self.target_database._get_existing_table_names(filter_valid=True)
migrations = []
for table in tables:
migration_map = self.upgrade_table_from_schema(table, dry_run)
if migration_map:
migrations.append(migration_map)
return migrations
def get_table_diff(self, table_name):
target_model_schema = (
self.target_database.cached_session.query(AnnoMetadata.schema_type)
.filter(AnnoMetadata.table_name == table_name)
.one()
)
schema = target_model_schema[0]
db_cols = self.target_inspector.get_columns(table_name)
schema_cols = self.schema_inspector.get_columns(schema)
formatted_schema_columns = self._column_names(schema_cols)
formatted_db_columns = self._column_names(db_cols)
db_model = self.target_database.get_table_sql_metadata(table_name)
schema_model = self.schema_database.get_table_sql_metadata(schema)
columns_to_create = set(formatted_schema_columns) - set(formatted_db_columns)
return db_model, schema_model, columns_to_create
def set_default_non_nullable(self, db_table, column, model_column, sql):
if not model_column.nullable:
if column == "created":
table_name = db_table.name
creation_time = (
self.target_database.cached_session.query(AnnoMetadata.created)
.filter(AnnoMetadata.table_name == table_name)
.one()
)
sql += f" DEFAULT '{creation_time[0].strftime('%Y-%m-%d %H:%M:%S')}'"
else:
model_column.nullable = True
return sql
def extract_target_id(self, indexes: dict) -> dict:
return {
"reference_table": index.get("foreign_key_table")
for index in indexes.values()
if index.get("foreign_key_column") == "target_id"
}
def get_table_indexes(self, table_name: str, db: str = "target"):
"""Reflect current indexes, primary key(s) and foreign keys
on given target table using SQLAlchemy inspector method.
Args:
table_name (str): target table to reflect
Returns:
dict: Map of reflected indices on given table.
"""
inspector = getattr(self, f"{db}_inspector")
try:
pk_columns = inspector.get_pk_constraint(table_name)
indexed_columns = inspector.get_indexes(table_name)
foreign_keys = inspector.get_foreign_keys(table_name)
except Exception as e:
logging.error(f"No table named '{table_name}', error: {e}")
return None
index_map = {}
if pk_columns:
pkey_name = pk_columns.get("name").lower()
pk_name = {"primary_key_name": pkey_name}
if pk_name["primary_key_name"]:
pk = {
"column_name": pk_columns["constrained_columns"][0],
"index_name": pkey_name,
"type": "primary_key",
}
index_map[pkey_name] = pk
if indexed_columns:
for index in indexed_columns:
dialect_options = index.get("dialect_options", None)
index_name = index["name"].lower()
indx_map = {
"column_name": index["column_names"][0],
"index_name": index_name,
}
if dialect_options:
if "gist" in dialect_options.values():
indx_map.update(
{
"type": "spatial_index",
"dialect_options": index.get("dialect_options"),
}
)
else:
indx_map.update({"type": "index", "dialect_options": None})
index_map[index_name] = indx_map
if foreign_keys:
for foreign_key in foreign_keys:
foreign_key_name = foreign_key["name"].lower()
fk_data = {
"column_name": foreign_key["referred_columns"][0],
"type": "foreign_key",
"foreign_key_name": foreign_key_name,
"foreign_key_table": foreign_key["referred_table"],
"foreign_key_column": foreign_key["constrained_columns"][0],
"target_column": foreign_key["referred_columns"][0],
}
index_map[foreign_key_name] = fk_data
return index_map
def get_index_from_model(self, table_name: str, model):
"""Generate index mapping, primary key and foreign keys(s)
from supplied SQLAlchemy model. Returns an index map.
Args:
model (SqlAlchemy Model): database model to reflect indices
Returns:
dict: Index map
"""
model = model.__table__
index_map = {}
for column in model.columns:
if column.primary_key:
pk_index_name = f"{table_name}_pkey".lower()
pk = {
"column_name": column.name,
"index_name": pk_index_name,
"type": "primary_key",
}
index_map[pk_index_name] = pk
if column.index:
index_name = f"ix_{table_name}_{column.name}"
indx_map = {
"column_name": column.name,
"index_name": index_name,
"type": "index",
"dialect_options": None,
}
index_map[index_name] = indx_map
if isinstance(column.type, Geometry):
sptial_index_name = f"idx_{table_name}_{column.name}".lower()
spatial_index_map = {
"column_name": column.name,
"index_name": sptial_index_name,
"type": "spatial_index",
"dialect_options": {"postgresql_using": "gist"},
}
index_map[sptial_index_name] = spatial_index_map
if column.foreign_keys:
metadata_obj = MetaData()
metadata_obj.reflect(bind=self.target_database.engine)
target_table = metadata_obj.tables.get(table_name)
foreign_keys = list(target_table.foreign_keys)
for foreign_key in foreign_keys:
(
target_table_name,
target_column,
) = foreign_key.target_fullname.split(".")
foreign_key_name = foreign_key.name.lower()
foreign_key_map = {
"type": "foreign_key",
"column_name": foreign_key.constraint.column_keys[0],
"foreign_key_name": foreign_key_name,
"foreign_key_table": target_table_name,
"foreign_key_column": foreign_key.constraint.column_keys[0],
"target_column": target_column,
}
index_map[foreign_key_name] = foreign_key_map
return index_map
def drop_table_indexes(self, table_name: str):
"""Generate SQL command to drop all indexes and
constraints on target table.
Args:
table_name (str): target table to drop constraints and indices
engine (SQLAlchemy Engine instance): supplied SQLAlchemy engine
Returns:
bool: True if all constraints and indices are dropped
"""
indices = self.get_table_indexes(table_name)
if not indices:
return f"No indices on '{table_name}' found."
command = f"ALTER TABLE {table_name}"
constraints_list = []
for column_info in indices.values():
if "foreign_key" in column_info["type"]:
constraints_list.append(
f"{command} DROP CONSTRAINT IF EXISTS {column_info['foreign_key_name']}"
)
if "primary_key" in column_info["type"]:
constraints_list.append(
f"{command} DROP CONSTRAINT IF EXISTS {column_info['index_name']}"
)
drop_constraint = f"{'; '.join(constraints_list)} CASCADE"
command = f"{drop_constraint};"
index_list = [
col["index_name"] for col in indices.values() if "index" in col["type"]
]
if index_list:
drop_index = f"DROP INDEX {', '.join(index_list)}"
command = f"{command} {drop_index};"
try:
engine = self.target_database.engine
with engine.connect() as conn:
conn.execute(command)
except Exception as e:
raise (e)
return True
def get_missing_indexes(self, table_name: str, model=None):
"""Add missing indexes by comparing current table and
schema table db indexes. Will add missing indices from model to table.
Args:
table_name (str): target table to drop constraints and indices
engine (SQLAlchemy Engine instance): supplied SQLAlchemy engine
Returns:
str: list of indices added to table
"""
current_indexes = self.get_table_indexes(table_name, "target")
table_schema_type = self._get_table_schema_type(table_name)
table_metadata = self.extract_target_id(current_indexes)
if not model:
schema_model = self.get_schema_from_migration(table_schema_type)
model = self.schema_client.create_annotation_model(
f"ref_{table_name}", table_schema_type, table_metadata, True
)
model_indexes = self.get_index_from_model(table_name, model)
missing_indexes = set(model_indexes) - set(current_indexes)
existing_indexes = current_indexes.values()
index_list = [index["column_name"] for index in existing_indexes]
missing_indexes = [
key
for key, value in model_indexes.items()
if value["column_name"] not in index_list
]
commands = {}
for index in missing_indexes:
index_type = model_indexes[index]["type"]
column_name = model_indexes[index]["column_name"]
if index_type == "primary_key":
command = add_primary_key(table_name, column_name)
if index_type == "index":
command = add_index(table_name, column_name, is_spatial=False)
if index_type == "spatial_index":
command = add_index(table_name, column_name, is_spatial=True)
if index_type == "foreign_key":
foreign_key_name = model_indexes[column_name]["foreign_key_name"]
foreign_key_table = model_indexes[column_name]["foreign_key_table"]
foreign_key_column = model_indexes[column_name]["foreign_key_column"]
target_column = model_indexes[column_name]["target_column"]
command = add_foreign_key(
table_name,
foreign_key_name,
foreign_key_column,
foreign_key_table,
target_column,
)
missing_indexes.append(foreign_key_name)
index_key = f"{column_name}_{index_type}"
commands[index_key] = command
return commands
@staticmethod
def _column_names(tables):
if hasattr(tables, "__table__"):
table_columns = tables.__table__.columns
elif hasattr(tables, "columns"):
table_columns = tables.columns
elif isinstance(tables, object):
return [table.get("name") for table in tables]
return {i.name for i in table_columns}
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,987
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/tests/conftest.py
|
import logging
import time
import uuid
import warnings
import docker
import psycopg2
import pytest
from dynamicannotationdb import DynamicAnnotationInterface
logging.basicConfig(level=logging.DEBUG)
test_logger = logging.getLogger()
def pytest_addoption(parser):
parser.addoption(
"--docker",
action="store",
default=False,
help="Use docker for postgres testing",
)
@pytest.fixture(scope="session")
def docker_mode(request):
return request.config.getoption("--docker")
def pytest_configure(config):
config.addinivalue_line("markers", "docker: use postgres in docker")
@pytest.fixture(scope="session")
def database_metadata() -> dict:
yield {
"postgis_docker_image": "postgis/postgis:13-master",
"db_host": "localhost",
"sql_uri": "postgresql://postgres:postgres@localhost:5432/test_volume",
}
@pytest.fixture(scope="session")
def annotation_metadata():
yield {
"aligned_volume": "test_volume",
"table_name": "anno_test",
"schema_type": "synapse",
"pcg_table_name": "test_pcg",
"voxel_resolution_x": 4.0,
"voxel_resolution_y": 4.0,
"voxel_resolution_z": 40.0,
}
@pytest.fixture(scope="session", autouse=True)
def postgis_server(docker_mode, database_metadata: dict) -> None:
postgis_docker_image = database_metadata["postgis_docker_image"]
sql_uri = database_metadata["sql_uri"]
if docker_mode:
test_logger.info(f"PULLING {postgis_docker_image} IMAGE")
docker_client = docker.from_env()
try:
docker_client.images.pull(repository=postgis_docker_image)
except Exception as e:
test_logger.exception(f"Failed to pull postgres image {e}")
container_name = f"test_postgis_server_{uuid.uuid4()}"
test_container = docker_client.containers.run(
image=postgis_docker_image,
detach=True,
hostname="test_postgres",
auto_remove=True,
name=container_name,
environment=[
"POSTGRES_USER=postgres",
"POSTGRES_PASSWORD=postgres",
"POSTGRES_DB=test_volume",
],
ports={"5432/tcp": 5432},
)
test_logger.info("STARTING IMAGE")
try:
time.sleep(10)
check_database(sql_uri)
except Exception as e:
raise e
yield
if docker_mode:
warnings.filterwarnings(
action="ignore", message="unclosed", category=ResourceWarning
)
container = docker_client.containers.get(container_name)
container.stop()
@pytest.fixture(scope="session")
def dadb_interface(postgis_server, database_metadata, annotation_metadata):
sql_uri = database_metadata["sql_uri"]
aligned_volume = annotation_metadata["aligned_volume"]
yield DynamicAnnotationInterface(sql_uri, aligned_volume)
def check_database(sql_uri: str) -> None: # pragma: no cover
try:
test_logger.info("ATTEMPT TO CONNECT")
conn = psycopg2.connect(sql_uri)
cur = conn.cursor()
cur.execute("SELECT 1")
test_logger.info("CONNECTED")
cur.close()
conn.close()
except Exception as e:
test_logger.info(e)
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
29,988
|
seung-lab/DynamicAnnotationDB
|
refs/heads/master
|
/dynamicannotationdb/migration/alembic/versions/ef5c2d7f96d8_initial_live_db_models.py
|
"""Initial Live DB models
Revision ID: ef5c2d7f96d8
Revises:
Create Date: 2022-08-08 09:59:29.189065
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine import reflection
from sqlalchemy import engine_from_config
# revision identifiers, used by Alembic.
revision = "ef5c2d7f96d8"
down_revision = None
branch_labels = None
depends_on = None
def get_tables():
config = op.get_context().config
engine = engine_from_config(
config.get_section(config.config_ini_section), prefix="sqlalchemy."
)
inspector = reflection.Inspector.from_engine(engine)
return inspector.get_table_names()
def upgrade():
tables = get_tables()
if "analysisversion" not in tables:
op.create_table(
"analysisversion",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("datastack", sa.String(length=100), nullable=False),
sa.Column("version", sa.Integer(), nullable=False),
sa.Column("time_stamp", sa.DateTime(), nullable=False),
sa.Column("valid", sa.Boolean(), nullable=True),
sa.Column("expires_on", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
if "analysistables" not in tables:
op.create_table(
"analysistables",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("aligned_volume", sa.String(length=100), nullable=False),
sa.Column("schema", sa.String(length=100), nullable=False),
sa.Column("table_name", sa.String(length=100), nullable=False),
sa.Column("valid", sa.Boolean(), nullable=True),
sa.Column("created", sa.DateTime(), nullable=False),
sa.Column("analysisversion_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["analysisversion_id"],
["analysisversion.id"],
),
sa.PrimaryKeyConstraint("id"),
)
if "annotation_table_metadata" not in tables:
op.create_table(
"annotation_table_metadata",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("schema_type", sa.String(length=100), nullable=False),
sa.Column("table_name", sa.String(length=100), nullable=False),
sa.Column("valid", sa.Boolean(), nullable=True),
sa.Column("created", sa.DateTime(), nullable=False),
sa.Column("deleted", sa.DateTime(), nullable=True),
sa.Column("user_id", sa.String(length=255), nullable=False),
sa.Column("description", sa.Text(), nullable=False),
sa.Column("reference_table", sa.String(length=100), nullable=True),
sa.Column("flat_segmentation_source", sa.String(length=300), nullable=True),
sa.Column("voxel_resolution_x", sa.Float(), nullable=False),
sa.Column("voxel_resolution_y", sa.Float(), nullable=False),
sa.Column("voxel_resolution_z", sa.Float(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("table_name"),
)
if "segmentation_table_metadata" not in tables:
op.create_table(
"segmentation_table_metadata",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("schema_type", sa.String(length=100), nullable=False),
sa.Column("table_name", sa.String(length=100), nullable=False),
sa.Column("valid", sa.Boolean(), nullable=True),
sa.Column("created", sa.DateTime(), nullable=False),
sa.Column("deleted", sa.DateTime(), nullable=True),
sa.Column("segmentation_source", sa.String(length=255), nullable=True),
sa.Column("pcg_table_name", sa.String(length=255), nullable=False),
sa.Column("last_updated", sa.DateTime(), nullable=True),
sa.Column("annotation_table", sa.String(length=100), nullable=True),
sa.ForeignKeyConstraint(
["annotation_table"],
["annotation_table_metadata.table_name"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("table_name"),
)
def downgrade():
op.drop_table("analysistables")
op.drop_table("analysisversion")
op.drop_table("segmentation_table_metadata")
op.drop_table("annotation_table_metadata")
|
{"/dynamicannotationdb/database.py": ["/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/dynamicannotationdb/__init__.py": ["/dynamicannotationdb/interface.py"], "/dynamicannotationdb/migration/__init__.py": ["/dynamicannotationdb/migration/migrate.py"], "/dynamicannotationdb/interface.py": ["/dynamicannotationdb/annotation.py", "/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py", "/dynamicannotationdb/segmentation.py"], "/dynamicannotationdb/segmentation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/key_utils.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/test_errors.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/schema.py": ["/dynamicannotationdb/errors.py"], "/dynamicannotationdb/migration/migrate.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"], "/tests/conftest.py": ["/dynamicannotationdb/__init__.py"], "/dynamicannotationdb/annotation.py": ["/dynamicannotationdb/database.py", "/dynamicannotationdb/errors.py", "/dynamicannotationdb/models.py", "/dynamicannotationdb/schema.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.