hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38466c62ca75deaf1eb22c2b078ae07bc0a46e7f
| 1,794
|
py
|
Python
|
bookshelf/views.py
|
CleysonPH/django-bookshelf
|
aa979738c695a79d16a2beb1df7a9e4fafc696f9
|
[
"MIT"
] | 1
|
2020-04-01T21:25:06.000Z
|
2020-04-01T21:25:06.000Z
|
bookshelf/views.py
|
CleysonPH/django-bookshelf
|
aa979738c695a79d16a2beb1df7a9e4fafc696f9
|
[
"MIT"
] | 3
|
2021-03-30T13:09:30.000Z
|
2021-06-10T18:49:36.000Z
|
bookshelf/views.py
|
CleysonPH/django-bookshelf
|
aa979738c695a79d16a2beb1df7a9e4fafc696f9
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from .models import BookshelfItem
from catalog.models import Book
@login_required
def mark_book_has_read(request, pk):
book = get_object_or_404(Book, pk=pk)
try:
bookshelf_item = BookshelfItem.objects.get(
user=request.user,
book=book
)
bookshelf_item.status = 'read'
bookshelf_item.save()
except BookshelfItem.DoesNotExist:
bookshelf_item = BookshelfItem.objects.create(
user=request.user,
book=book,
status='read'
)
return redirect('catalog:book-detail', pk=book.pk)
@login_required
def mark_book_has_reading(request, pk):
book = get_object_or_404(Book, pk=pk)
try:
bookshelf_item = BookshelfItem.objects.get(
user=request.user,
book=book
)
bookshelf_item.status = 'reading'
bookshelf_item.save()
except BookshelfItem.DoesNotExist:
bookshelf_item = BookshelfItem.objects.create(
user=request.user,
book=book,
status='reading'
)
return redirect('catalog:book-detail', pk=book.pk)
@login_required
def mark_book_has_want_read(request, pk):
book = get_object_or_404(Book, pk=pk)
try:
bookshelf_item = BookshelfItem.objects.get(
user=request.user,
book=book
)
bookshelf_item.status = 'want'
bookshelf_item.save()
except BookshelfItem.DoesNotExist:
bookshelf_item = BookshelfItem.objects.create(
user=request.user,
book=book,
status='want'
)
return redirect('catalog:book-detail', pk=book.pk)
| 27.181818
| 64
| 0.635452
| 202
| 1,794
| 5.455446
| 0.193069
| 0.141561
| 0.141561
| 0.179673
| 0.818512
| 0.818512
| 0.794011
| 0.794011
| 0.758621
| 0.758621
| 0
| 0.009224
| 0.274805
| 1,794
| 65
| 65
| 27.6
| 0.837817
| 0
| 0
| 0.654545
| 0
| 0
| 0.048495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.072727
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
69a43e9bc2268269b6955400c7f8a63b7e4eb578
| 269
|
py
|
Python
|
tests/parser/aggregates.count.assignment.11.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.count.assignment.11.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.count.assignment.11.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
r(Y,Z) :- Z = #count{ X: d(X,Y) }, Y = #count{ A: e(A,Z) }.
d(a,1). d(b,1).
e(a,2).
d(a,3).
e(a,1). e(b,1). e(c,1).
"""
output = """
r(Y,Z) :- Z = #count{ X: d(X,Y) }, Y = #count{ A: e(A,Z) }.
d(a,1). d(b,1).
e(a,2).
d(a,3).
e(a,1). e(b,1). e(c,1).
"""
| 14.157895
| 59
| 0.36803
| 74
| 269
| 1.337838
| 0.202703
| 0.121212
| 0.121212
| 0.080808
| 0.888889
| 0.888889
| 0.888889
| 0.888889
| 0.888889
| 0.888889
| 0
| 0.065116
| 0.200743
| 269
| 18
| 60
| 14.944444
| 0.395349
| 0
| 0
| 0.857143
| 0
| 0.142857
| 0.884758
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
69b5674e247350b9a9c9db9d1de673093656d727
| 7,760
|
py
|
Python
|
ai_demos/haystackai.py
|
OpenJarbas/ai_demos
|
103db27318c8ed413197318de176207e56c28584
|
[
"MIT"
] | 1
|
2021-03-15T06:34:56.000Z
|
2021-03-15T06:34:56.000Z
|
ai_demos/haystackai.py
|
OpenJarbas/ai_demos
|
103db27318c8ed413197318de176207e56c28584
|
[
"MIT"
] | null | null | null |
ai_demos/haystackai.py
|
OpenJarbas/ai_demos
|
103db27318c8ed413197318de176207e56c28584
|
[
"MIT"
] | 2
|
2020-02-27T08:22:59.000Z
|
2020-08-16T16:38:47.000Z
|
import requests
# self hosted https://github.com/itoolset/nsfw
def open_nsfw(picture_path, engine="haystackai_demo"):
url = "https://api.haystack.ai/api/image/custom?output=json&limit=10000&model=yahoonsfw&apikey=c1ee6e8f5a8a2a26935e38d211a0e327"
with open(picture_path, 'rb') as f:
files = {'image': (picture_path, f.read(), 'image/jpeg')}
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
"Host": "api.haystack.ai",
"Origin": "https://www.haystack.ai",
"Referer": "https://www.haystack.ai/demos/Yahoo-NSFW-Demo"}
r = requests.post(url, files=files, headers=headers)
return r.json()
def analyze(picture_path, engine="haystackai_demo"):
url = "https://api.haystack.ai/api/image/analyze?output=json&limit=10000&apikey=c1ee6e8f5a8a2a26935e38d211a0e327"
with open(picture_path, 'rb') as f:
files = {'image': (picture_path, f.read(), 'image/jpeg')}
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
"Host": "api.haystack.ai",
"Origin": "https://www.haystack.ai",
"Referer": "https://www.haystack.ai/demos/Yahoo-NSFW-Demo"}
r = requests.post(url, files=files, headers=headers)
return r.json()
def hotness(picture_path):
url = "https://api.haystack.ai/api/image/analyze?output=json&limit=10000&model=attractiveness&apikey=c1ee6e8f5a8a2a26935e38d211a0e327"
with open(picture_path, 'rb') as f:
files = {'image': (picture_path, f.read(), 'image/jpeg')}
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
"Host": "api.haystack.ai",
"Origin": "https://www.haystack.ai",
"Referer": "https://www.haystack.ai/demos/Yahoo-NSFW-Demo"}
r = requests.post(url, files=files, headers=headers)
return r.json()
def emotion(picture_path):
url = "https://api.haystack.ai/api/image/analyze?output=json&limit=10000&model=emotion&apikey=c1ee6e8f5a8a2a26935e38d211a0e327"
with open(picture_path, 'rb') as f:
files = {'image': (picture_path, f.read(), 'image/jpeg')}
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
"Host": "api.haystack.ai",
"Origin": "https://www.haystack.ai",
"Referer": "https://www.haystack.ai/demos/Yahoo-NSFW-Demo"}
r = requests.post(url, files=files, headers=headers)
return r.json()
def nudity(picture_path):
url = "https://api.haystack.ai/api/image/analyze?output=json&limit=10000&model=nudity&apikey=c1ee6e8f5a8a2a26935e38d211a0e327"
with open(picture_path, 'rb') as f:
files = {'image': (picture_path, f.read(), 'image/jpeg')}
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
"Host": "api.haystack.ai",
"Origin": "https://www.haystack.ai",
"Referer": "https://www.haystack.ai/demos/Yahoo-NSFW-Demo"}
r = requests.post(url, files=files, headers=headers)
return r.json()
def flower_demo(picture_path):
url = "https://api.haystack.ai/api/image/custom?output=json&limit=10000&model=oxfordflower&apikey=c1ee6e8f5a8a2a26935e38d211a0e327"
with open(picture_path, 'rb') as f:
files = {'image': (picture_path, f.read(), 'image/jpeg')}
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
"Host": "api.haystack.ai",
"Origin": "https://www.haystack.ai",
"Referer": "https://www.haystack.ai/demos/Yahoo-NSFW-Demo"}
r = requests.post(url, files=files, headers=headers)
return r.json()
def yearbook_demo(picture_path):
url = "https://api.haystack.ai/api/image/custom?output=json&limit=10000&model=yearbook&apikey=c1ee6e8f5a8a2a26935e38d211a0e327"
with open(picture_path, 'rb') as f:
files = {'image': (picture_path, f.read(), 'image/jpeg')}
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
"Host": "api.haystack.ai",
"Origin": "https://www.haystack.ai",
"Referer": "https://www.haystack.ai/demos/Yahoo-NSFW-Demo"}
r = requests.post(url, files=files, headers=headers)
return r.json()
def ethnicity(picture_path):
url = "https://api.haystack.ai/api/image/analyze?output=json&limit=10000&model=ethnicity&apikey=c1ee6e8f5a8a2a26935e38d211a0e327"
with open(picture_path, 'rb') as f:
files = {'image': (picture_path, f.read(), 'image/jpeg')}
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
"Host": "api.haystack.ai",
"Origin": "https://www.haystack.ai",
"Referer": "https://www.haystack.ai/demos/Yahoo-NSFW-Demo"}
r = requests.post(url, files=files, headers=headers)
return r.json()
def gender(picture_path):
url = "https://api.haystack.ai/api/image/analyze?output=json&limit=10000&model=gender&apikey=c1ee6e8f5a8a2a26935e38d211a0e327"
with open(picture_path, 'rb') as f:
files = {'image': (picture_path, f.read(), 'image/jpeg')}
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
"Host": "api.haystack.ai",
"Origin": "https://www.haystack.ai",
"Referer": "https://www.haystack.ai/demos/Yahoo-NSFW-Demo"}
r = requests.post(url, files=files, headers=headers)
return r.json()
def age(picture_path):
url = "https://api.haystack.ai/api/image/analyze?output=json&limit=10000&model=age&apikey=c1ee6e8f5a8a2a26935e38d211a0e327"
with open(picture_path, 'rb') as f:
files = {'image': (picture_path, f.read(), 'image/jpeg')}
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
"Host": "api.haystack.ai",
"Origin": "https://www.haystack.ai",
"Referer": "https://www.haystack.ai/demos/Yahoo-NSFW-Demo"}
r = requests.post(url, files=files, headers=headers)
return r.json()
def inception(picture_path):
url = "https://api.haystack.ai/api/image/custom?output=json&limit=10000&model=inception&apikey=c1ee6e8f5a8a2a26935e38d211a0e327"
with open(picture_path, 'rb') as f:
files = {'image': (picture_path, f.read(), 'image/jpeg')}
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
"Host": "api.haystack.ai",
"Origin": "https://www.haystack.ai",
"Referer": "https://www.haystack.ai/demos/Yahoo-NSFW-Demo"}
r = requests.post(url, files=files, headers=headers)
return r.json()
def places(picture_path):
url = "https://api.haystack.ai/api/image/custom?output=json&limit=10000&model=places205alexnet&apikey=c1ee6e8f5a8a2a26935e38d211a0e327"
with open(picture_path, 'rb') as f:
files = {'image': (picture_path, f.read(), 'image/jpeg')}
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
"Host": "api.haystack.ai",
"Origin": "https://www.haystack.ai",
"Referer": "https://www.haystack.ai/demos/Yahoo-NSFW-Demo"}
r = requests.post(url, files=files, headers=headers)
return r.json()
| 48.5
| 139
| 0.658505
| 1,060
| 7,760
| 4.782075
| 0.075472
| 0.094693
| 0.061551
| 0.085224
| 0.953837
| 0.953837
| 0.953837
| 0.953837
| 0.953837
| 0.953837
| 0
| 0.094747
| 0.163531
| 7,760
| 159
| 140
| 48.805031
| 0.686181
| 0.00567
| 0
| 0.81203
| 0
| 0.180451
| 0.545702
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090226
| false
| 0
| 0.007519
| 0
| 0.18797
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
69d05e16cfb25396ae127ff9c97327238b3484e8
| 6,553
|
py
|
Python
|
clouds/io/dataset.py
|
jchen42703/understanding-clouds-kaggle
|
6972deb25cdf363ae0d9a9ad26d538280613fc94
|
[
"Apache-2.0"
] | 1
|
2019-10-26T16:33:40.000Z
|
2019-10-26T16:33:40.000Z
|
clouds/io/dataset.py
|
jchen42703/understanding-clouds-kaggle
|
6972deb25cdf363ae0d9a9ad26d538280613fc94
|
[
"Apache-2.0"
] | 1
|
2019-11-08T02:50:25.000Z
|
2019-11-19T03:36:54.000Z
|
clouds/io/dataset.py
|
jchen42703/understanding-clouds-kaggle
|
6972deb25cdf363ae0d9a9ad26d538280613fc94
|
[
"Apache-2.0"
] | null | null | null |
import albumentations as albu
from albumentations import pytorch as AT
import pandas as pd
import numpy as np
import os
import cv2
from torch.utils.data import Dataset
from .utils import make_mask, make_mask_resized_dset, get_classification_label
class CloudDataset(Dataset):
def __init__(self, data_folder: str, df: pd.DataFrame, im_ids: np.array,
masks_folder: str=None,
transforms=albu.Compose([albu.HorizontalFlip(), AT.ToTensor()]),
preprocessing=None, mask_shape=(320, 640)):
"""
Attributes
data_folder (str): path to the image directory
df (pd.DataFrame): dataframe with the labels
im_ids (np.ndarray): of image names.
masks_folder (str): path to the masks directory
assumes `use_resized_dataset == True`
transforms (albumentations.augmentation): transforms to apply
before preprocessing. Defaults to HFlip and ToTensor
preprocessing: ops to perform after transforms, such as
z-score standardization. Defaults to None.
mask_shape (tuple): <- mask shape (numpy format, not cv2)
"""
self.df = df
self.data_folder = data_folder
self.masks_folder = masks_folder
if isinstance(masks_folder, str):
self.use_resized_dataset = True
print(f"Using resized masks in {masks_folder}...")
else:
self.use_resized_dataset = False
self.img_ids = im_ids
self.transforms = transforms
self.preprocessing = preprocessing
self.mask_shape = mask_shape
def __getitem__(self, idx):
image_name = self.img_ids[idx]
if not self.use_resized_dataset:
mask = make_mask(self.df, image_name)
else:
mask = make_mask_resized_dset(self.df, image_name,
self.masks_folder,
shape=self.mask_shape)
mask = (mask > 0.9)*1
# loading image
image_path = os.path.join(self.data_folder, image_name)
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# apply augmentations
augmented = self.transforms(image=img, mask=mask)
img = augmented["image"]
mask = augmented["mask"]
if self.preprocessing:
preprocessed = self.preprocessing(image=img, mask=mask)
img = preprocessed["image"]
mask = preprocessed["mask"]
return img, mask
def __len__(self):
return len(self.img_ids)
class ClassificationCloudDataset(Dataset):
def __init__(self, data_folder: str, df: pd.DataFrame, im_ids: np.array,
transforms=albu.Compose([albu.HorizontalFlip(), AT.ToTensor()]),
preprocessing=None):
"""
Attributes
data_folder (str): path to the image directory
df (pd.DataFrame): dataframe with the labels
im_ids (np.ndarray): of image names.
transforms (albumentations.augmentation): transforms to apply
before preprocessing. Defaults to HFlip and ToTensor
preprocessing: ops to perform after transforms, such as
z-score standardization. Defaults to None.
"""
df["hasMask"] = ~ df["EncodedPixels"].isna()
self.df = df
self.data_folder = data_folder
self.img_ids = im_ids
self.transforms = transforms
self.preprocessing = preprocessing
def __getitem__(self, idx):
image_name = self.img_ids[idx]
# loading image
image_path = os.path.join(self.data_folder, image_name)
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
class_label = get_classification_label(self.df, image_name)
# apply augmentations
augmented = self.transforms(image=img)
img = augmented["image"]
if self.preprocessing:
preprocessed = self.preprocessing(image=img, mask=None)
img = preprocessed["image"]
return img, class_label
def __len__(self):
return len(self.img_ids)
class ClfSegCloudDataset(CloudDataset):
def __init__(self, data_folder: str, df: pd.DataFrame, im_ids: np.array,
masks_folder: str=None,
transforms=albu.Compose([albu.HorizontalFlip(), AT.ToTensor()]),
preprocessing=None, mask_shape=(320, 640)):
"""
Attributes
data_folder (str): path to the image directory
df (pd.DataFrame): dataframe with the labels
im_ids (np.ndarray): of image names.
masks_folder (str): path to the masks directory
assumes `use_resized_dataset == True`
transforms (albumentations.augmentation): transforms to apply
before preprocessing. Defaults to HFlip and ToTensor
preprocessing: ops to perform after transforms, such as
z-score standardization. Defaults to None.
mask_shape (tuple): <- mask shape (numpy format, not cv2)
"""
df["hasMask"] = ~ df["EncodedPixels"].isna()
super().__init__(data_folder=data_folder, df=df, im_ids=im_ids,
masks_folder=masks_folder, transforms=transforms,
preprocessing=preprocessing, mask_shape=mask_shape)
def __getitem__(self, idx):
image_name = self.img_ids[idx]
if not self.use_resized_dataset:
mask = make_mask(self.df, image_name)
else:
mask = make_mask_resized_dset(self.df, image_name,
self.masks_folder,
shape=self.mask_shape)
mask = (mask > 0.9)*1
# loading image
image_path = os.path.join(self.data_folder, image_name)
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
class_label = get_classification_label(self.df, image_name)
# apply augmentations
augmented = self.transforms(image=img, mask=mask)
img = augmented["image"]
mask = augmented["mask"]
if self.preprocessing:
preprocessed = self.preprocessing(image=img, mask=mask)
img = preprocessed["image"]
mask = preprocessed["mask"]
return {"features": img, "seg_targets": mask,
"clf_targets": class_label}
| 42.277419
| 81
| 0.605829
| 739
| 6,553
| 5.173207
| 0.151556
| 0.039236
| 0.029296
| 0.023542
| 0.837824
| 0.817159
| 0.817159
| 0.817159
| 0.817159
| 0.748627
| 0
| 0.007264
| 0.30673
| 6,553
| 154
| 82
| 42.551948
| 0.834251
| 0.242942
| 0
| 0.735294
| 0
| 0
| 0.033405
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0
| 0.078431
| 0.019608
| 0.235294
| 0.009804
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
69e00c557053d42bfb28e12e43d69afd78b9ed48
| 232
|
py
|
Python
|
probeye/definition/__init__.py
|
BAMresearch/probeye
|
ff018ef629f7d5ce4a263b6656b363f90ab6be02
|
[
"MIT"
] | null | null | null |
probeye/definition/__init__.py
|
BAMresearch/probeye
|
ff018ef629f7d5ce4a263b6656b363f90ab6be02
|
[
"MIT"
] | 42
|
2021-08-24T06:50:17.000Z
|
2022-03-25T09:05:41.000Z
|
probeye/definition/__init__.py
|
BAMresearch/probeye
|
ff018ef629f7d5ce4a263b6656b363f90ab6be02
|
[
"MIT"
] | 2
|
2021-11-14T22:30:54.000Z
|
2022-02-28T13:39:00.000Z
|
# module imports
from probeye.definition import inference_problem
from probeye.definition import forward_model
from probeye.definition import noise_model
from probeye.definition import parameter
from probeye.definition import prior
| 33.142857
| 48
| 0.875
| 30
| 232
| 6.666667
| 0.433333
| 0.275
| 0.525
| 0.675
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099138
| 232
| 6
| 49
| 38.666667
| 0.956938
| 0.060345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
69ff1afaf94764d1730bc4acd8b3eb2b14b48b01
| 141
|
py
|
Python
|
sumo_rl/agents/__init__.py
|
joaovitorblabres/sumo-rl
|
ec9d178cd0289366ba0a8648da52972d31d1026e
|
[
"MIT"
] | null | null | null |
sumo_rl/agents/__init__.py
|
joaovitorblabres/sumo-rl
|
ec9d178cd0289366ba0a8648da52972d31d1026e
|
[
"MIT"
] | null | null | null |
sumo_rl/agents/__init__.py
|
joaovitorblabres/sumo-rl
|
ec9d178cd0289366ba0a8648da52972d31d1026e
|
[
"MIT"
] | null | null | null |
from sumo_rl.agents.ql_agent import QLAgent
from sumo_rl.agents.pql_agent import PQLAgent
from sumo_rl.agents.pql_agent_non import mPQLAgent
| 35.25
| 50
| 0.87234
| 25
| 141
| 4.64
| 0.48
| 0.206897
| 0.258621
| 0.413793
| 0.413793
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 141
| 3
| 51
| 47
| 0.899225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0e1011bdc3b35f954f825e46f1bfda88de98d04e
| 4,016
|
py
|
Python
|
test/pyaz/sig/image_definition/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
test/pyaz/sig/image_definition/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | 9
|
2021-09-24T16:37:24.000Z
|
2021-12-24T00:39:19.000Z
|
test/pyaz/sig/image_definition/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def create(resource_group, gallery_name, gallery_image_definition, os_type, publisher, offer, sku, os_state=None, end_of_life_date=None, privacy_statement_uri=None, release_note_uri=None, eula=None, description=None, location=None, minimum_cpu_core=None, maximum_cpu_core=None, minimum_memory=None, maximum_memory=None, disallowed_disk_types=None, plan_name=None, plan_publisher=None, plan_product=None, tags=None, hyper_v_generation=None, features=None):
params = get_params(locals())
command = "az sig image-definition create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(resource_group, gallery_name):
params = get_params(locals())
command = "az sig image-definition list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(resource_group, gallery_name, gallery_image_definition):
params = get_params(locals())
command = "az sig image-definition show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(resource_group, gallery_name, gallery_image_definition):
params = get_params(locals())
command = "az sig image-definition delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(resource_group, gallery_name, gallery_image_definition, set=None, add=None, remove=None, force_string=None):
params = get_params(locals())
command = "az sig image-definition update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list_shared(location, gallery_unique_name, shared_to=None):
params = get_params(locals())
command = "az sig image-definition list-shared " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show_shared(location, gallery_unique_name, gallery_image_definition):
params = get_params(locals())
command = "az sig image-definition show-shared " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 39.372549
| 455
| 0.68999
| 504
| 4,016
| 5.376984
| 0.166667
| 0.072325
| 0.051661
| 0.054244
| 0.828782
| 0.80738
| 0.80738
| 0.773432
| 0.773432
| 0.773432
| 0
| 0.004351
| 0.198705
| 4,016
| 101
| 456
| 39.762376
| 0.837787
| 0
| 0
| 0.827957
| 0
| 0
| 0.072958
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075269
| false
| 0
| 0.021505
| 0
| 0.172043
| 0.225806
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
38ac3fcd72c0df7773268b07168e72ee2990615d
| 515,776
|
py
|
Python
|
labman/db/tests/test_process.py
|
antgonza/labman
|
c3bb7a15cbfdbbf60a7b2b176fff207f99af0002
|
[
"BSD-3-Clause"
] | null | null | null |
labman/db/tests/test_process.py
|
antgonza/labman
|
c3bb7a15cbfdbbf60a7b2b176fff207f99af0002
|
[
"BSD-3-Clause"
] | null | null | null |
labman/db/tests/test_process.py
|
antgonza/labman
|
c3bb7a15cbfdbbf60a7b2b176fff207f99af0002
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-, labman development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import main
from datetime import datetime, timezone
from io import StringIO
from re import escape, search
import numpy as np
import numpy.testing as npt
import pandas as pd
from labman.db.testing import LabmanTestCase
from labman.db.container import Tube, Well
from labman.db.composition import (
ReagentComposition, SampleComposition, GDNAComposition,
LibraryPrep16SComposition, Composition, PoolComposition,
PrimerComposition, PrimerSetComposition, LibraryPrepShotgunComposition,
PrimerSet)
from labman.db.user import User
from labman.db.plate import Plate, PlateConfiguration
from labman.db.equipment import Equipment
from labman.db.process import (
Process, SamplePlatingProcess, ReagentCreationProcess,
PrimerWorkingPlateCreationProcess, GDNAExtractionProcess,
LibraryPrep16SProcess, QuantificationProcess, PoolingProcess,
SequencingProcess, GDNAPlateCompressionProcess, NormalizationProcess,
LibraryPrepShotgunProcess)
from labman.db.study import Study
def _help_compare_timestamps(input_datetime):
# can't really check that the timestamp is an exact value,
# so instead check that current time (having just created process)
# is within 60 seconds of time at which process was created.
# This is a heuristic--may fail if you e.g. put a breakpoint
# between create call and assertLess call.
time_diff = datetime.now() - input_datetime
is_close = time_diff.total_seconds() < 60
return is_close
def _help_make_datetime(input_datetime_str):
# input_datetime_str should be in format '2017-10-25 19:10:25'
return datetime.strptime(input_datetime_str, '%Y-%m-%d %H:%M:%S')
class TestProcess(LabmanTestCase):
def test_factory(self):
self.assertEqual(Process.factory(11),
SamplePlatingProcess(11))
self.assertEqual(Process.factory(6),
ReagentCreationProcess(6))
self.assertEqual(Process.factory(4),
PrimerWorkingPlateCreationProcess(1))
self.assertEqual(Process.factory(12),
GDNAExtractionProcess(1))
self.assertEqual(Process.factory(19),
GDNAPlateCompressionProcess(1))
self.assertEqual(Process.factory(13),
LibraryPrep16SProcess(1))
self.assertEqual(Process.factory(21),
NormalizationProcess(1))
self.assertEqual(Process.factory(22),
LibraryPrepShotgunProcess(1))
self.assertEqual(Process.factory(14),
QuantificationProcess(1))
self.assertEqual(Process.factory(15),
QuantificationProcess(2))
self.assertEqual(Process.factory(16), PoolingProcess(1))
self.assertEqual(Process.factory(18), SequencingProcess(1))
class TestSamplePlatingProcess(LabmanTestCase):
def test_attributes(self):
tester = SamplePlatingProcess(11)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-25 19:10:25'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 11)
self.assertEqual(tester.plate, Plate(21))
def test_create(self):
user = User('test@foo.bar')
# 1 -> 96-well deep-well plate
plate_config = PlateConfiguration(1)
obs = SamplePlatingProcess.create(
user, plate_config, 'unittest Plate 1', 10)
self.assertTrue(_help_compare_timestamps(obs.date))
self.assertEqual(obs.personnel, user)
# Check that the plate has been created with the correct values
obs_plate = obs.plate
self.assertIsInstance(obs_plate, Plate)
self.assertEqual(obs_plate.external_id, 'unittest Plate 1')
self.assertEqual(obs_plate.plate_configuration, plate_config)
self.assertFalse(obs_plate.discarded)
self.assertIsNone(obs_plate.notes)
# Check that all the wells in the plate contain blanks
plate_layout = obs_plate.layout
for i, row in enumerate(plate_layout):
for j, well in enumerate(row):
self.assertIsInstance(well, Well)
self.assertEqual(well.plate, obs_plate)
self.assertEqual(well.row, i + 1)
self.assertEqual(well.column, j + 1)
self.assertEqual(well.latest_process, obs)
obs_composition = well.composition
self.assertIsInstance(obs_composition, SampleComposition)
self.assertEqual(obs_composition.sample_composition_type,
'blank')
self.assertIsNone(obs_composition.sample_id)
self.assertEqual(obs_composition.content,
'blank.%s.%s' % ("unittest.Plate.1",
well.well_id))
self.assertEqual(obs_composition.upstream_process, obs)
self.assertEqual(obs_composition.container, well)
self.assertEqual(obs_composition.total_volume, 10)
def test_update_well(self):
tester = SamplePlatingProcess(11)
obs = SampleComposition(8)
self.assertEqual(obs.sample_composition_type, 'blank')
self.assertIsNone(obs.sample_id)
self.assertEqual(obs.content, 'blank.Test.plate.1.H1')
# Update a well from CONTROL -> EXPERIMENTAL SAMPLE
self.assertEqual(
tester.update_well(8, 1, '1.SKM8.640201'), ('1.SKM8.640201', True))
self.assertEqual(obs.sample_composition_type, 'experimental sample')
self.assertEqual(obs.sample_id, '1.SKM8.640201')
self.assertEqual(obs.content, '1.SKM8.640201')
# Update a well from EXPERIMENTAL SAMPLE -> EXPERIMENTAL SAMPLE
self.assertEqual(
tester.update_well(8, 1, '1.SKB6.640176'),
('1.SKB6.640176.Test.plate.1.H1', True))
self.assertEqual(obs.sample_composition_type, 'experimental sample')
self.assertEqual(obs.sample_id, '1.SKB6.640176')
self.assertEqual(obs.content, '1.SKB6.640176.Test.plate.1.H1')
# Update a well from EXPERIMENTAL SAMPLE -> CONTROL
self.assertEqual(tester.update_well(8, 1, 'vibrio.positive.control'),
('vibrio.positive.control.Test.plate.1.H1', True))
self.assertEqual(obs.sample_composition_type,
'vibrio.positive.control')
self.assertIsNone(obs.sample_id)
self.assertEqual(obs.content, 'vibrio.positive.control.Test.plate.1.H1')
# Update a well from CONTROL -> CONTROL
self.assertEqual(tester.update_well(8, 1, 'blank'),
('blank.Test.plate.1.H1', True))
self.assertEqual(obs.sample_composition_type, 'blank')
self.assertIsNone(obs.sample_id)
self.assertEqual(obs.content, 'blank.Test.plate.1.H1')
def test_comment_well(self):
tester = SamplePlatingProcess(11)
obs = SampleComposition(8)
self.assertIsNone(obs.notes)
tester.comment_well(8, 1, 'New notes')
self.assertEqual(obs.notes, 'New notes')
tester.comment_well(8, 1, None)
self.assertIsNone(obs.notes)
def test_notes(self):
tester = SamplePlatingProcess(11)
self.assertIsNone(tester.notes)
tester.notes = 'This note was set in a test'
self.assertEqual(tester.notes, 'This note was set in a test')
class TestReagentCreationProcess(LabmanTestCase):
def test_attributes(self):
tester = ReagentCreationProcess(6)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-23 09:10:25'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 6)
self.assertEqual(tester.tube, Tube(2))
def test_create(self):
user = User('test@foo.bar')
obs = ReagentCreationProcess.create(user, 'Reagent external id', 10,
'extraction kit')
self.assertTrue(_help_compare_timestamps(obs.date))
self.assertEqual(obs.personnel, user)
# Check that the tube has been create with the correct values
obs_tube = obs.tube
self.assertIsInstance(obs_tube, Tube)
self.assertEqual(obs_tube.external_id, 'Reagent external id')
self.assertEqual(obs_tube.remaining_volume, 10)
self.assertIsNone(obs_tube.notes)
self.assertEqual(obs_tube.latest_process, obs)
# Perform the reagent composition checks
obs_composition = obs_tube.composition
self.assertIsInstance(obs_composition, ReagentComposition)
self.assertEqual(obs_composition.container, obs_tube)
self.assertEqual(obs_composition.total_volume, 10)
self.assertIsNone(obs_composition.notes)
self.assertEqual(obs_composition.external_lot_id,
'Reagent external id')
self.assertEqual(obs_composition.reagent_type, 'extraction kit')
class TestPrimerWorkingPlateCreationProcess(LabmanTestCase):
def test_attributes(self):
tester = PrimerWorkingPlateCreationProcess(1)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-23 19:10:25'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 4)
exp_plates = [Plate(11), Plate(12), Plate(13), Plate(14),
Plate(15), Plate(16), Plate(17), Plate(18)]
self.assertEqual(tester.primer_set, PrimerSet(1))
self.assertEqual(tester.master_set_order, 'EMP PRIMERS MSON 1')
self.assertEqual(tester.plates, exp_plates)
def test_create(self):
test_date = _help_make_datetime('2018-01-18 00:00:00')
user = User('test@foo.bar')
primer_set = PrimerSet(1)
obs = PrimerWorkingPlateCreationProcess.create(
user, primer_set, 'Master Set Order 1',
creation_date=test_date)
self.assertEqual(obs.date, test_date)
self.assertEqual(obs.personnel, user)
self.assertEqual(obs.primer_set, primer_set)
self.assertEqual(obs.master_set_order, 'Master Set Order 1')
obs_plates = obs.plates
obs_date = datetime.strftime(obs.date, Process.get_date_format())
self.assertEqual(len(obs_plates), 8)
self.assertEqual(obs_plates[0].external_id,
'EMP 16S V4 primer plate 1 ' + obs_date)
self.assertEqual(
obs_plates[0].get_well(1, 1).composition.primer_set_composition,
PrimerSetComposition(1))
# This tests the edge case in which a plate already exists that has
# the external id that would usually be generated by the create
# process, in which case a 4-digit random number is added as a
# disambiguator.
obs = PrimerWorkingPlateCreationProcess.create(
user, primer_set, 'Master Set Order 1',
creation_date=str(obs.date))
obs_ext_id = obs.plates[0].external_id
regex = r'EMP 16S V4 primer plate 1 ' + escape(obs_date) + \
' \d\d\d\d$'
matches = search(regex, obs_ext_id)
self.assertIsNotNone(matches)
class TestGDNAExtractionProcess(LabmanTestCase):
def test_attributes(self):
tester = GDNAExtractionProcess(1)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-25 19:10:25'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 12)
self.assertEqual(tester.kingfisher, Equipment(11))
self.assertEqual(tester.epmotion, Equipment(5))
self.assertEqual(tester.epmotion_tool, Equipment(15))
self.assertEqual(tester.extraction_kit, ReagentComposition(2))
self.assertEqual(tester.sample_plate.id, 21)
self.assertEqual(tester.volume, 10)
self.assertEqual(tester.notes, None)
def test_create(self):
test_date = _help_make_datetime('2018-01-01 00:00:01')
user = User('test@foo.bar')
ep_robot = Equipment(6)
kf_robot = Equipment(11)
tool = Equipment(15)
kit = ReagentComposition(1)
plate = Plate(21)
notes = 'test note'
obs = GDNAExtractionProcess.create(
user, plate, kf_robot, ep_robot, tool, kit, 10,
'gdna - Test plate 1',
extraction_date=test_date, notes=notes)
self.assertEqual(obs.date, test_date)
self.assertEqual(obs.personnel, user)
self.assertEqual(obs.kingfisher, Equipment(11))
self.assertEqual(obs.epmotion, Equipment(6))
self.assertEqual(obs.epmotion_tool, Equipment(15))
self.assertEqual(obs.extraction_kit, ReagentComposition(1))
self.assertEqual(obs.sample_plate, Plate(21))
self.assertEqual(obs.volume, 10)
self.assertEqual(obs.notes, 'test note')
# Check the extracted plate
obs_plates = obs.plates
self.assertEqual(len(obs_plates), 1)
obs_plate = obs_plates[0]
self.assertIsInstance(obs_plate, Plate)
self.assertEqual(obs_plate.external_id, 'gdna - Test plate 1')
self.assertEqual(obs_plate.plate_configuration,
plate.plate_configuration)
self.assertFalse(obs_plate.discarded)
# Check the wells in the plate
plate_layout = obs_plate.layout
for i, row in enumerate(plate_layout):
for j, well in enumerate(row):
if i == 7 and j == 11:
# The last well of the plate is an empty well
self.assertIsNone(well)
else:
self.assertIsInstance(well, Well)
self.assertEqual(well.plate, obs_plate)
self.assertEqual(well.row, i + 1)
self.assertEqual(well.column, j + 1)
self.assertEqual(well.latest_process, obs)
obs_composition = well.composition
self.assertIsInstance(obs_composition, GDNAComposition)
self.assertEqual(obs_composition.upstream_process, obs)
self.assertEqual(obs_composition.container, well)
self.assertEqual(obs_composition.total_volume, 10)
# The sample compositions of the gDNA compositions change depending on
# the well. Spot check a few sample and controls
self.assertEqual(
plate_layout[0][0].composition.sample_composition.sample_id,
'1.SKB1.640202')
self.assertEqual(
plate_layout[1][1].composition.sample_composition.sample_id,
'1.SKB2.640194')
self.assertIsNone(
plate_layout[6][0].composition.sample_composition.sample_id)
self.assertEqual(
plate_layout[
6][0].composition.sample_composition.sample_composition_type,
'vibrio.positive.control')
self.assertIsNone(
plate_layout[7][0].composition.sample_composition.sample_id)
self.assertEqual(
plate_layout[
7][0].composition.sample_composition.sample_composition_type,
'blank')
class TestGDNAPlateCompressionProcess(LabmanTestCase):
def test_get_interleaved_quarters_position_generator(self):
# ensure error thrown for invalid number of quarters
exp_err = "Expected number of quarters to be an integer between 1 " \
"and 4 but received 5"
with self.assertRaisesRegex(ValueError, exp_err):
x = GDNAPlateCompressionProcess\
.get_interleaved_quarters_position_generator(5, 2, 2)
next(x)
exp_err = "Expected number of quarters to be an integer between 1 " \
"and 4 but received 1.5"
with self.assertRaisesRegex(ValueError, exp_err):
x = GDNAPlateCompressionProcess\
.get_interleaved_quarters_position_generator(1.5, 2, 2)
next(x)
# ensure error thrown for invalid total rows, cols
exp_err = "Expected number of rows and columns to be positive " \
"integers evenly divisible by two but received 0 rows and " \
"2 columns"
with self.assertRaisesRegex(ValueError, exp_err):
x = GDNAPlateCompressionProcess \
.get_interleaved_quarters_position_generator(4, 0, 2)
next(x)
exp_err = "Expected number of rows and columns to be positive " \
"integers evenly divisible by two but received 2 rows and " \
"1 columns"
with self.assertRaisesRegex(ValueError, exp_err):
x = GDNAPlateCompressionProcess \
.get_interleaved_quarters_position_generator(4, 2, 1)
next(x)
# ensure correct results returned for all numbers of quarters
x = GDNAPlateCompressionProcess \
.get_interleaved_quarters_position_generator(1, 16, 24)
self.assertListEqual(list(x), INTERLEAVED_POSITIONS[:96])
x = GDNAPlateCompressionProcess \
.get_interleaved_quarters_position_generator(2, 16, 24)
self.assertListEqual(list(x), INTERLEAVED_POSITIONS[:192])
x = GDNAPlateCompressionProcess \
.get_interleaved_quarters_position_generator(3, 16, 24)
self.assertListEqual(list(x), INTERLEAVED_POSITIONS[:288])
x = GDNAPlateCompressionProcess \
.get_interleaved_quarters_position_generator(4, 16, 24)
self.assertListEqual(list(x), INTERLEAVED_POSITIONS)
def test_attributes(self):
tester = GDNAPlateCompressionProcess(1)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-25 19:10:25'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 19)
self.assertEqual(tester.plates, [Plate(24)])
self.assertEqual(tester.robot, Equipment(1))
self.assertEqual(tester.gdna_plates, [Plate(22), Plate(28), Plate(31),
Plate(34)])
def test_create(self):
user = User('test@foo.bar')
# Create a couple of new plates so it is easy to test the interleaving
spp = SamplePlatingProcess.create(
user, PlateConfiguration(1), 'Compression Test 1', 1)
spp.update_well(1, 1, '1.SKM7.640188')
spp.update_well(1, 2, '1.SKD9.640182')
spp.update_well(1, 3, '1.SKM8.640201')
spp.update_well(1, 4, '1.SKB8.640193')
spp.update_well(1, 5, '1.SKD2.640178')
spp.update_well(1, 6, '1.SKM3.640197')
spp.update_well(1, 7, '1.SKM4.640180')
spp.update_well(1, 8, '1.SKB9.640200')
spp.update_well(2, 1, '1.SKB4.640189')
spp.update_well(2, 2, '1.SKB5.640181')
spp.update_well(2, 3, '1.SKB6.640176')
spp.update_well(2, 4, '1.SKM2.640199')
spp.update_well(2, 5, '1.SKM5.640177')
spp.update_well(2, 6, '1.SKB1.640202')
spp.update_well(2, 7, '1.SKD8.640184')
spp.update_well(2, 8, '1.SKD4.640185')
plateA = spp.plates[0]
spp = SamplePlatingProcess.create(
user, PlateConfiguration(1), 'Compression Test 2', 1)
spp.update_well(1, 1, '1.SKB4.640189')
spp.update_well(1, 2, '1.SKB5.640181')
spp.update_well(1, 3, '1.SKB6.640176')
spp.update_well(1, 4, '1.SKM2.640199')
spp.update_well(1, 5, '1.SKM5.640177')
spp.update_well(1, 6, '1.SKB1.640202')
spp.update_well(1, 7, '1.SKD8.640184')
spp.update_well(1, 8, '1.SKD4.640185')
spp.update_well(2, 1, '1.SKB3.640195')
spp.update_well(2, 2, '1.SKM1.640183')
spp.update_well(2, 3, '1.SKB7.640196')
spp.update_well(2, 4, '1.SKD3.640198')
spp.update_well(2, 5, '1.SKD7.640191')
spp.update_well(2, 6, '1.SKD6.640190')
spp.update_well(2, 7, '1.SKB2.640194')
spp.update_well(2, 8, '1.SKM9.640192')
plateB = spp.plates[0]
# Extract the plates
ep_robot = Equipment(6)
tool = Equipment(15)
kit = ReagentComposition(1)
ep1 = GDNAExtractionProcess.create(
user, plateA, Equipment(11), ep_robot, tool, kit, 100,
'gdna - Test Comp 1')
ep2 = GDNAExtractionProcess.create(
user, plateB, Equipment(12), ep_robot, tool, kit, 100,
'gdna - Test Comp 2')
obs = GDNAPlateCompressionProcess.create(
user, [ep1.plates[0], ep2.plates[0]], 'Compressed plate AB',
Equipment(1))
self.assertTrue(_help_compare_timestamps(obs.date))
self.assertEqual(obs.personnel, user)
obs_plates = obs.plates
self.assertEqual(len(obs_plates), 1)
obs_layout = obs_plates[0].layout
exp_positions = [
# Row 1 plate A
(1, 1, '1.SKM7.640188'), (1, 3, '1.SKD9.640182'),
(1, 5, '1.SKM8.640201'), (1, 7, '1.SKB8.640193'),
(1, 9, '1.SKD2.640178'), (1, 11, '1.SKM3.640197'),
(1, 13, '1.SKM4.640180'), (1, 15, '1.SKB9.640200'),
# Row 1 plate B
(1, 2, '1.SKB4.640189'), (1, 4, '1.SKB5.640181'),
(1, 6, '1.SKB6.640176'), (1, 8, '1.SKM2.640199'),
(1, 10, '1.SKM5.640177'), (1, 12, '1.SKB1.640202'),
(1, 14, '1.SKD8.640184'), (1, 16, '1.SKD4.640185'),
# Row 2 plate A
(3, 1, '1.SKB4.640189'), (3, 3, '1.SKB5.640181'),
(3, 5, '1.SKB6.640176'), (3, 7, '1.SKM2.640199'),
(3, 9, '1.SKM5.640177'), (3, 11, '1.SKB1.640202'),
(3, 13, '1.SKD8.640184'), (3, 15, '1.SKD4.640185'),
# Row 2 plate B
(3, 2, '1.SKB3.640195'), (3, 4, '1.SKM1.640183'),
(3, 6, '1.SKB7.640196'), (3, 8, '1.SKD3.640198'),
(3, 10, '1.SKD7.640191'), (3, 12, '1.SKD6.640190'),
(3, 14, '1.SKB2.640194'), (3, 16, '1.SKM9.640192')]
for row, col, sample_id in exp_positions:
well = obs_layout[row - 1][col - 1]
self.assertEqual(well.row, row)
self.assertEqual(well.column, col)
self.assertEqual(
well.composition.gdna_composition.sample_composition.sample_id,
sample_id)
# In these positions we did not have an origin plate, do not store
# anything, this way we can differentiate from blanks and save
# reagents during library prep
for col in range(0, 15):
self.assertIsNone(obs_layout[1][col])
self.assertEqual(obs.robot, Equipment(1))
self.assertEqual(obs.gdna_plates, [ep1.plates[0], ep2.plates[0]])
class TestLibraryPrep16SProcess(LabmanTestCase):
def test_attributes(self):
tester = LibraryPrep16SProcess(1)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-25 02:10:25'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 13)
self.assertEqual(tester.mastermix, ReagentComposition(3))
self.assertEqual(tester.water_lot, ReagentComposition(4))
self.assertEqual(tester.epmotion, Equipment(8))
self.assertEqual(tester.epmotion_tm300_tool, Equipment(16))
self.assertEqual(tester.epmotion_tm50_tool, Equipment(17))
self.assertEqual(tester.gdna_plate.id, 22) # Plate(22))
self.assertEqual(tester.primer_plate, Plate(11))
self.assertEqual(tester.volume, 10)
def test_create(self):
user = User('test@foo.bar')
master_mix = ReagentComposition(2)
water = ReagentComposition(3)
robot = Equipment(8)
tm300_8_tool = Equipment(16)
tm50_8_tool = Equipment(17)
volume = 75
plates = [(Plate(22), Plate(11))]
obs = LibraryPrep16SProcess.create(
user, Plate(22), Plate(11), 'New 16S plate', robot,
tm300_8_tool, tm50_8_tool, master_mix, water, volume)
self.assertTrue(_help_compare_timestamps(obs.date))
self.assertEqual(obs.personnel, user)
self.assertEqual(obs.mastermix, ReagentComposition(2))
self.assertEqual(obs.water_lot, ReagentComposition(3))
self.assertEqual(obs.epmotion, Equipment(8))
self.assertEqual(obs.epmotion_tm300_tool, Equipment(16))
self.assertEqual(obs.epmotion_tm50_tool, Equipment(17))
self.assertEqual(obs.gdna_plate, Plate(22))
self.assertEqual(obs.primer_plate, Plate(11))
self.assertEqual(obs.volume, 75)
# Check the generated plates
obs_plates = obs.plates
self.assertEqual(len(obs_plates), 1)
obs_plate = obs_plates[0]
self.assertIsInstance(obs_plate, Plate)
self.assertEqual(obs_plate.external_id, 'New 16S plate')
self.assertEqual(obs_plate.plate_configuration,
plates[0][0].plate_configuration)
# Check the well in the plate
plate_layout = obs_plate.layout
for i, row in enumerate(plate_layout):
for j, well in enumerate(row):
if i == 7 and j == 11:
self.assertIsNone(well)
else:
self.assertIsInstance(well, Well)
self.assertEqual(well.plate, obs_plate)
self.assertEqual(well.row, i + 1)
self.assertEqual(well.column, j + 1)
self.assertEqual(well.latest_process, obs)
obs_composition = well.composition
self.assertIsInstance(obs_composition,
LibraryPrep16SComposition)
self.assertEqual(obs_composition.upstream_process, obs)
self.assertEqual(obs_composition.container, well)
self.assertEqual(obs_composition.total_volume, 75)
# spot check a couple of elements
sample_id = plate_layout[0][
0].composition.gdna_composition.sample_composition.sample_id
self.assertEqual(sample_id, '1.SKB1.640202')
barcode = plate_layout[0][
0].composition.primer_composition.primer_set_composition.barcode
self.assertEqual(barcode, 'AGCCTTCGTCGC')
class TestNormalizationProcess(LabmanTestCase):
def test_calculate_norm_vol(self):
dna_concs = np.array([[2, 7.89], [np.nan, .0]])
exp_vols = np.array([[2500., 632.5], [3500., 3500.]])
obs_vols = NormalizationProcess._calculate_norm_vol(dna_concs)
np.testing.assert_allclose(exp_vols, obs_vols)
def test_attributes(self):
tester = NormalizationProcess(1)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-25 19:10:25'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 21)
self.assertEqual(tester.quantification_process,
QuantificationProcess(3))
self.assertEqual(tester.water_lot, ReagentComposition(4))
exp = {'function': 'default',
'parameters' : {'total_volume': 3500, 'target_dna': 5,
'min_vol': 2.5, 'max_volume': 3500,
'resolution': 2.5, 'reformat': False}}
self.assertEqual(tester.normalization_function_data, exp)
self.assertEqual(tester.compressed_plate, Plate(24))
def test_create(self):
user = User('test@foo.bar')
water = ReagentComposition(3)
obs = NormalizationProcess.create(
user, QuantificationProcess(3), water, 'Create-Norm plate 1')
self.assertTrue(_help_compare_timestamps(obs.date))
self.assertEqual(obs.personnel, user)
self.assertEqual(obs.quantification_process,
QuantificationProcess(3))
self.assertEqual(obs.water_lot, ReagentComposition(3))
# Check the generated plates
obs_plates = obs.plates
self.assertEqual(len(obs_plates), 1)
obs_plate = obs_plates[0]
self.assertEqual(obs_plate.external_id, 'Create-Norm plate 1')
# Spot check some wells in the plate
plate_layout = obs_plate.layout
self.assertEqual(plate_layout[0][0].composition.dna_volume, 415)
self.assertEqual(plate_layout[0][0].composition.water_volume, 3085)
def test_format_picklist(self):
exp_picklist = (
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t'
'Concentration\tTransfer Volume\tDestination Plate Name\t'
'Destination Well\n'
'sam1\tWater\t384PP_AQ_BP2_HT\tA1\t2.0\t1000.0\tNormalizedDNA\t'
'A1\n'
'sam2\tWater\t384PP_AQ_BP2_HT\tA2\t7.89\t2867.5\tNormalizedDNA\t'
'A2\n'
'blank1\tWater\t384PP_AQ_BP2_HT\tB1\tnan\t0.0\tNormalizedDNA\tB1\n'
'sam3\tWater\t384PP_AQ_BP2_HT\tB2\t0.0\t0.0\tNormalizedDNA\tB2\n'
'sam1\tSample\t384PP_AQ_BP2_HT\tA1\t2.0\t2500.0\tNormalizedDNA\t'
'A1\n'
'sam2\tSample\t384PP_AQ_BP2_HT\tA2\t7.89\t632.5\tNormalizedDNA\t'
'A2\n'
'blank1\tSample\t384PP_AQ_BP2_HT\tB1\tnan\t3500.0\tNormalizedDNA\t'
'B1\n'
'sam3\tSample\t384PP_AQ_BP2_HT\tB2\t0.0\t3500.0\tNormalizedDNA\t'
'B2')
dna_vols = np.array([[2500., 632.5], [3500., 3500.]])
water_vols = 3500 - dna_vols
wells = np.array([['A1', 'A2'], ['B1', 'B2']])
sample_names = np.array([['sam1', 'sam2'], ['blank1', 'sam3']])
dna_concs = np.array([[2, 7.89], [np.nan, .0]])
obs_picklist = NormalizationProcess._format_picklist(
dna_vols, water_vols, wells, sample_names=sample_names,
dna_concs=dna_concs)
self.assertEqual(exp_picklist, obs_picklist)
# test if switching dest wells
exp_picklist = (
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t'
'Concentration\tTransfer Volume\tDestination Plate Name\t'
'Destination Well\n'
'sam1\tWater\t384PP_AQ_BP2_HT\tA1\t2.0\t1000.0\tNormalizedDNA\t'
'D1\n'
'sam2\tWater\t384PP_AQ_BP2_HT\tA2\t7.89\t2867.5\tNormalizedDNA\t'
'D2\n'
'blank1\tWater\t384PP_AQ_BP2_HT\tB1\tnan\t0.0\tNormalizedDNA\tE1\n'
'sam3\tWater\t384PP_AQ_BP2_HT\tB2\t0.0\t0.0\tNormalizedDNA\tE2\n'
'sam1\tSample\t384PP_AQ_BP2_HT\tA1\t2.0\t2500.0\tNormalizedDNA\t'
'D1\n'
'sam2\tSample\t384PP_AQ_BP2_HT\tA2\t7.89\t632.5\tNormalizedDNA\t'
'D2\n'
'blank1\tSample\t384PP_AQ_BP2_HT\tB1\tnan\t3500.0\tNormalizedDNA\t'
'E1\n'
'sam3\tSample\t384PP_AQ_BP2_HT\tB2\t0.0\t3500.0\tNormalizedDNA\t'
'E2')
dna_vols = np.array([[2500., 632.5], [3500., 3500.]])
water_vols = 3500 - dna_vols
wells = np.array([['A1', 'A2'], ['B1', 'B2']])
dest_wells = np.array([['D1', 'D2'], ['E1', 'E2']])
sample_names = np.array([['sam1', 'sam2'], ['blank1', 'sam3']])
dna_concs = np.array([[2, 7.89], [np.nan, .0]])
obs_picklist = NormalizationProcess._format_picklist(
dna_vols, water_vols, wells, dest_wells=dest_wells,
sample_names=sample_names, dna_concs=dna_concs)
self.assertEqual(exp_picklist, obs_picklist)
def test_generate_echo_picklist(self):
obs = NormalizationProcess(2).generate_echo_picklist()
self.assertEqual(obs, NORM_PROCESS_PICKLIST)
class TestQuantificationProcess(LabmanTestCase):
def test_compute_pico_concentration(self):
dna_vals = np.array([[10.14, 7.89, 7.9, 15.48],
[7.86, 8.07, 8.16, 9.64],
[12.29, 7.64, 7.32, 13.74]])
obs = QuantificationProcess._compute_pico_concentration(
dna_vals, size=400)
exp = np.array([[38.4090909, 29.8863636, 29.9242424, 58.6363636],
[29.7727273, 30.5681818, 30.9090909, 36.5151515],
[46.5530303, 28.9393939, 27.7272727, 52.0454545]])
npt.assert_allclose(obs, exp)
def test_make_2D_array(self):
example_qpcr_df = pd.DataFrame(
{'Sample DNA Concentration': [12, 0, 5, np.nan],
'Well': ['A1', 'A2', 'A3', 'A4']})
exp_cp_array = np.array([[12.0, 0.0, 5.0, np.nan]])
obs = QuantificationProcess._make_2D_array(
example_qpcr_df, rows=1, cols=4).astype(float)
np.testing.assert_allclose(obs, exp_cp_array)
example2_qpcr_df = pd.DataFrame({'Cp': [12, 0, 1, np.nan,
12, 0, 5, np.nan],
'Pos': ['A1', 'A2', 'A3', 'A4',
'B1', 'B2', 'B3', 'B4']})
exp2_cp_array = np.array([[12.0, 0.0, 1.0, np.nan],
[12.0, 0.0, 5.0, np.nan]])
obs = QuantificationProcess._make_2D_array(
example2_qpcr_df, data_col='Cp', well_col='Pos', rows=2,
cols=4).astype(float)
np.testing.assert_allclose(obs, exp2_cp_array)
def test_rationalize_pico_csv_string(self):
pico_csv1 = ('Results \r'
' \r'
'Well ID\tWell\t[Blanked-RFU]\t[Concentration] \r'
'SPL1\tA1\t<0.000\t3.432 \r'
'SPL2\tA2\t4949.000\t3.239 \r'
'SPL3\tB1\t>15302.000\t10.016 \r'
'SPL4\tB2\t4039.000\t2.644 \r'
' \r'
'Curve2 Fitting Results \r'
' \r'
'Curve Name\tCurve Formula\tA\tB\tR2\tFit F Prob\r'
'Curve2\tY=A*X+B\t1.53E+003\t0\t0.995\t?????')
expected_output = (
'Results \n'
' \n'
'Well ID\tWell\t[Blanked-RFU]\t[Concentration] \n'
'SPL1\tA1\t0.000\t3.432 \n'
'SPL2\tA2\t4949.000\t3.239 \n'
'SPL3\tB1\t15302.000\t10.016 \n'
'SPL4\tB2\t4039.000\t2.644 \n'
' \n'
'Curve2 Fitting Results \n'
' \n'
'Curve Name\tCurve Formula\tA\tB\tR2\tFit F Prob\n'
'Curve2\tY=A*X+B\t1.53E+003\t0\t0.995\t?????')
output1 = QuantificationProcess._rationalize_pico_csv_string(pico_csv1)
self.assertEqual(output1, expected_output)
pico_csv2 = ('Results \r\n'
' \r\n'
'Well ID\tWell\t[Blanked-RFU]\t[Concentration] \r\n'
'SPL1\tA1\t<0.000\t3.432 \r\n'
'SPL2\tA2\t4949.000\t3.239 \r\n'
'SPL3\tB1\t>15302.000\t10.016 \r\n'
'SPL4\tB2\t4039.000\t2.644 \r\n'
' \r\n'
'Curve2 Fitting Results \r\n'
' \r\n'
'Curve Name\tCurve Formula\tA\tB\tR2\tFit F Prob\r\n'
'Curve2\tY=A*X+B\t1.53E+003\t0\t0.995\t?????')
output2 = QuantificationProcess._rationalize_pico_csv_string(pico_csv2)
self.assertEqual(output2, expected_output)
def test_parse_pico_csv(self):
# Test a normal sheet
pico_csv1 = '''Results
Well ID\tWell\t[Blanked-RFU]\t[Concentration]
SPL1\tA1\t5243.000\t3.432
SPL2\tA2\t4949.000\t3.239
SPL3\tB1\t15302.000\t10.016
SPL4\tB2\t4039.000\t2.644
Curve2 Fitting Results
Curve Name\tCurve Formula\tA\tB\tR2\tFit F Prob
Curve2\tY=A*X+B\t1.53E+003\t0\t0.995\t?????
'''
exp_pico_df1 = pd.DataFrame({'Well': ['A1', 'A2', 'B1', 'B2'],
'Sample DNA Concentration':
[3.432, 3.239, 10.016, 2.644]})
obs_pico_df1 = QuantificationProcess._parse_pico_csv(pico_csv1)
pd.testing.assert_frame_equal(obs_pico_df1, exp_pico_df1,
check_like=True)
# Test a sheet that has some ????, <, and > values
pico_csv2 = '''Results
Well ID\tWell\t[Blanked-RFU]\t[Concentration]
SPL1\tA1\t5243.000\t>3.432
SPL2\tA2\t4949.000\t<0.000
SPL3\tB1\t15302.000\t10.016
SPL4\tB2\t\t?????
Curve2 Fitting Results
Curve Name\tCurve Formula\tA\tB\tR2\tFit F Prob
Curve2\tY=A*X+B\t1.53E+003\t0\t0.995\t?????
'''
exp_pico_df2 = pd.DataFrame({'Well': ['A1', 'A2', 'B1', 'B2'],
'Sample DNA Concentration':
[3.432, 0.000, 10.016, 10.016]})
obs_pico_df2 = QuantificationProcess._parse_pico_csv(pico_csv2)
pd.testing.assert_frame_equal(obs_pico_df2, exp_pico_df2,
check_like=True)
# Test a sheet that has unexpected value that can't be converted to #
pico_csv3 = '''Results
Well ID\tWell\t[Blanked-RFU]\t[Concentration]
SPL1\tA1\t5243.000\t3.432
SPL2\tA2\t4949.000\t3.239
SPL3\tB1\t15302.000\t10.016
SPL4\tB2\t\tfail
Curve2 Fitting Results
Curve Name\tCurve Formula\tA\tB\tR2\tFit F Prob
Curve2\tY=A*X+B\t1.53E+003\t0\t0.995\t?????
'''
with self.assertRaises(ValueError):
QuantificationProcess._parse_pico_csv(pico_csv3)
def test_parse(self):
# Test a normal sheet
# Note that the pico output file sometimes has \r (NOT \r\n)
# line endings
pico_csv1 = ('Results \r'
' \r'
'Well ID\tWell\t[Blanked-RFU]\t[Concentration] \r'
'SPL1\tA1\t5243.000\t3.432 \r'
'SPL2\tA2\t4949.000\t3.239 \r'
'SPL3\tB1\t15302.000\t10.016 \r'
'SPL4\tB2\t4039.000\t2.644 \r'
' \r'
'Curve2 Fitting Results \r'
' \r'
'Curve Name\tCurve Formula\tA\tB\tR2\tFit F Prob\r'
'Curve2\tY=A*X+B\t1.53E+003\t0\t0.995\t?????')
obs1 = QuantificationProcess.parse(pico_csv1)
exp = np.asarray(
[[3.432, 3.239, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan],
[10.016, 2.644, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan]])
npt.assert_allclose(obs1, exp)
# other times (maybe using other plate readers/machines?) the
# line endings are \r\n
pico_csv2 = ('Results \r\n'
' \r\n'
'Well ID\tWell\t[Blanked-RFU]\t[Concentration] \r\n'
'SPL1\tA1\t5243.000\t3.432 \r\n'
'SPL2\tA2\t4949.000\t3.239 \r\n'
'SPL3\tB1\t15302.000\t10.016 \r\n'
'SPL4\tB2\t4039.000\t2.644 \r\n'
' \r\n'
'Curve2 Fitting Results \r\n'
' \r\n'
'Curve Name\tCurve Formula\tA\tB\tR2\tFit F Prob\r\n'
'Curve2\tY=A*X+B\t1.53E+003\t0\t0.995\t?????')
obs2 = QuantificationProcess.parse(pico_csv2)
npt.assert_allclose(obs2, exp)
def test_attributes(self):
tester = QuantificationProcess(1)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-25 19:10:05'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 14)
self.assertEqual(tester.notes,None)
obs = tester.concentrations
# 380 because quantified 4 96-well plates in one process (and each
# plate has one empty well, hence 380 rather than 384)
self.assertEqual(len(obs), 380)
self.assertEqual(obs[0],
(LibraryPrep16SComposition(1), 20.0, 60.606))
self.assertEqual(obs[36],
(LibraryPrep16SComposition(37), 20.0, 60.606))
self.assertEqual(obs[7],
(LibraryPrep16SComposition(8), 1.0, 3.0303)) # blank
tester = QuantificationProcess(4)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-25 19:10:25'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 23)
self.assertEqual(tester.notes,None)
obs = tester.concentrations
self.assertEqual(len(obs), 380)
self.assertEqual( # experimental sample
obs[0], (LibraryPrepShotgunComposition(1), 12.068, 36.569))
self.assertEqual( # vibrio
obs[6], (LibraryPrepShotgunComposition(7), 8.904, 26.981))
self.assertEqual( # blank
obs[7], (LibraryPrepShotgunComposition(8), 0.342, 1.036))
tester = QuantificationProcess(5)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-26 03:10:25'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 27)
self.assertEqual(tester.notes,"Requantification--oops")
obs = tester.concentrations
self.assertEqual(len(obs), 380)
self.assertEqual(
obs[0], (LibraryPrepShotgunComposition(1), 13.068, 38.569))
self.assertEqual(
obs[6], (LibraryPrepShotgunComposition(7), 9.904, 28.981))
self.assertEqual(
obs[7], (LibraryPrepShotgunComposition(8), 1.342, 3.036))
def test_create(self):
user = User('test@foo.bar')
plate = Plate(23)
concentrations = np.around(np.random.rand(8, 12), 6)
# Add some known values for DNA concentration
concentrations[0][0] = 3
concentrations[0][1] = 4
concentrations[0][2] = 40
# Set blank wells to zero DNA concentrations
concentrations[7] = np.zeros_like(concentrations[7])
# add DNA concentrations to plate and check for sanity
obs = QuantificationProcess.create(user, plate, concentrations)
self.assertTrue(_help_compare_timestamps(obs.date))
self.assertEqual(obs.personnel, user)
obs_c = obs.concentrations
self.assertEqual(len(obs_c), 95)
self.assertEqual(obs_c[0][0], LibraryPrep16SComposition(1))
npt.assert_almost_equal(obs_c[0][1], concentrations[0][0])
self.assertIsNone(obs_c[0][2])
self.assertEqual(obs_c[12][0], LibraryPrep16SComposition(2)) # B1
npt.assert_almost_equal(obs_c[12][1], concentrations[1][0])
self.assertIsNone(obs_c[12][2])
# compute library concentrations (nM) from DNA concentrations (ng/uL)
obs.compute_concentrations()
obs_c = obs.concentrations
# Check the values that we know
npt.assert_almost_equal(obs_c[0][2], 9.09091)
npt.assert_almost_equal(obs_c[1][2], 12.1212)
npt.assert_almost_equal(obs_c[2][2], 121.212)
# Last row are all 0 because they're blanks
for i in range(84, 95):
npt.assert_almost_equal(obs_c[i][2], 0)
note = "a test note"
concentrations = np.around(np.random.rand(16, 24), 6)
# Add some known values
concentrations[0][0] = 10.14
concentrations[0][1] = 7.89
plate = Plate(26)
obs = QuantificationProcess.create(user, plate, concentrations, note)
self.assertTrue(_help_compare_timestamps(obs.date))
self.assertEqual(obs.personnel, user)
obs_c = obs.concentrations
self.assertEqual(len(obs_c), 380)
self.assertEqual(obs_c[0][0], LibraryPrepShotgunComposition(1))
npt.assert_almost_equal(obs_c[0][1], concentrations[0][0])
self.assertIsNone(obs_c[0][2])
obs.compute_concentrations(size=400)
obs_c = obs.concentrations
# Make sure that the known values are the ones that we expect
npt.assert_almost_equal(obs_c[0][2], 38.4091)
npt.assert_almost_equal(obs_c[1][2], 29.8864)
# Test empty concentrations
with self.assertRaises(ValueError):
QuantificationProcess.create(user, plate, [])
with self.assertRaises(ValueError):
QuantificationProcess.create(user, plate, [[]])
class TestLibraryPrepShotgunProcess(LabmanTestCase):
def test_attributes(self):
tester = LibraryPrepShotgunProcess(1)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-25 19:10:25'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 22)
self.assertEqual(tester.kappa_hyper_plus_kit, ReagentComposition(5))
self.assertEqual(tester.stub_lot, ReagentComposition(6))
self.assertEqual(tester.normalization_process, NormalizationProcess(1))
self.assertEqual(tester.normalized_plate, Plate(25))
self.assertEqual(tester.i5_primer_plate, Plate(19))
self.assertEqual(tester.i7_primer_plate, Plate(20))
self.assertEqual(tester.volume, 4000)
def test_create(self):
user = User('test@foo.bar')
plate = Plate(25)
kappa = ReagentComposition(4)
stub = ReagentComposition(5)
obs = LibraryPrepShotgunProcess.create(
user, plate, 'Test Shotgun Library 1', kappa, stub, 4000,
Plate(19), Plate(20))
self.assertTrue(_help_compare_timestamps(obs.date))
self.assertEqual(obs.personnel, user)
self.assertEqual(obs.kappa_hyper_plus_kit, kappa)
self.assertEqual(obs.stub_lot, stub)
self.assertEqual(obs.normalization_process, NormalizationProcess(1))
self.assertEqual(obs.normalized_plate, Plate(25))
self.assertEqual(obs.i5_primer_plate, Plate(19))
self.assertEqual(obs.i7_primer_plate, Plate(20))
self.assertEqual(obs.volume, 4000)
plates = obs.plates
self.assertEqual(len(plates), 1)
# The code below is not generating a layout, just reading the layout
# generated by LibraryPrepShotgunProcess.create into a
# convenience format.
# When LibraryPrepShotgunProcess.create makes obs, it fills the
# obs.plates[0].layout property with a list of lists of lists of Wells.
# This makes it very hard to create a test case: you have to know the
# database id of each of the wells to instantiate all the expected Well
# objects, and it is prohibitively time-consuming for the code to
# instantiate them in the test case and compare them to all the Well
# objects inobs.plates[0].layout.
# Because of this, I chose to set up the part of the test that checks
# whether the correct i5 and i7 primer has been assigned to the correct
# well using a known-good as a list of lists of lists of strings: the
# human-readable well id--A1, etc, the i7 primer name, and the
# i5 primer name. The known-good, in this format, is stored in
# SHOTGUN_PRIMER_LAYOUT. The code below is simply
# extracting those strings from obs.plates[0].layout (into a
# convenience variable obs_primer_layout) so that they can be compared
# with the known-good via assertListEqual.
# In the cases where a given Well object in obs.plates[0].layout is
# None, of course you can't access its various nested string
# properties, so None is returned instead of the strings.
obs_primer_layout = []
for row in obs.plates[0].layout:
row_detail = []
for well in row:
well_detail = [None, None, None]
if well is not None:
well_detail = [well.well_id,
well.composition.i7_composition.
primer_set_composition.external_id,
well.composition.i5_composition.
primer_set_composition.external_id]
# end if well is not None
row_detail.append(well_detail)
obs_primer_layout.append(row_detail)
self.assertListEqual(obs_primer_layout, SHOTGUN_PRIMER_LAYOUT)
def test_format_picklist(self):
exp_picklist = (
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t'
'Transfer Volume\tIndex Name\tIndex Sequence\t'
'Destination Plate Name\tDestination Well\n'
'sam1\tiTru5_plate\t384LDV_AQ_B2_HT\tA1\t250\tiTru5_01_A\tACCGACAA'
'\tIndexPCRPlate\tA1\n'
'sam2\tiTru5_plate\t384LDV_AQ_B2_HT\tB1\t250\tiTru5_01_B\tAGTGGCAA'
'\tIndexPCRPlate\tA2\n'
'blank1\tiTru5_plate\t384LDV_AQ_B2_HT\tC1\t250\tiTru5_01_C'
'\tCACAGACT\tIndexPCRPlate\tB1\n'
'sam3\tiTru5_plate\t384LDV_AQ_B2_HT\tD1\t250\tiTru5_01_D\tCGACACTT'
'\tIndexPCRPlate\tB2\n'
'sam1\tiTru7_plate\t384LDV_AQ_B2_HT\tA1\t250\tiTru7_101_01\t'
'ACGTTACC\tIndexPCRPlate\tA1\n'
'sam2\tiTru7_plate\t384LDV_AQ_B2_HT\tA2\t250\tiTru7_101_02\t'
'CTGTGTTG\tIndexPCRPlate\tA2\n'
'blank1\tiTru7_plate\t384LDV_AQ_B2_HT\tA3\t250\tiTru7_101_03\t'
'TGAGGTGT\tIndexPCRPlate\tB1\n'
'sam3\tiTru7_plate\t384LDV_AQ_B2_HT\tA4\t250\tiTru7_101_04\t'
'GATCCATG\tIndexPCRPlate\tB2')
sample_wells = np.array(['A1', 'A2', 'B1', 'B2'])
sample_names = np.array(['sam1', 'sam2', 'blank1', 'sam3'])
indices = pd.DataFrame({
'i5 name': {0: 'iTru5_01_A', 1: 'iTru5_01_B', 2: 'iTru5_01_C',
3: 'iTru5_01_D'},
'i5 plate': {0: 'iTru5_plate', 1: 'iTru5_plate', 2: 'iTru5_plate',
3: 'iTru5_plate'},
'i5 sequence': {0: 'ACCGACAA', 1: 'AGTGGCAA', 2: 'CACAGACT',
3: 'CGACACTT'},
'i5 well': {0: 'A1', 1: 'B1', 2: 'C1', 3: 'D1'},
'i7 name': {0: 'iTru7_101_01', 1: 'iTru7_101_02',
2: 'iTru7_101_03', 3: 'iTru7_101_04'},
'i7 plate': {0: 'iTru7_plate', 1: 'iTru7_plate', 2: 'iTru7_plate',
3: 'iTru7_plate'},
'i7 sequence': {0: 'ACGTTACC', 1: 'CTGTGTTG', 2: 'TGAGGTGT',
3: 'GATCCATG'},
'i7 well': {0: 'A1', 1: 'A2', 2: 'A3', 3: 'A4'},
'index combo seq': {0: 'ACCGACAAACGTTACC', 1: 'AGTGGCAACTGTGTTG',
2: 'CACAGACTTGAGGTGT', 3: 'CGACACTTGATCCATG'}})
obs_picklist = LibraryPrepShotgunProcess._format_picklist(
sample_names, sample_wells, indices)
self.assertEqual(exp_picklist, obs_picklist)
def test_generate_echo_picklist(self):
obs = LibraryPrepShotgunProcess(1).generate_echo_picklist()
obs_lines = obs.splitlines()
self.assertEqual(
obs_lines[0],
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t'
'Transfer Volume\tIndex Name\tIndex Sequence\t'
'Destination Plate Name\tDestination Well')
self.assertEqual(
obs_lines[1],
'1.SKB1.640202.Test.plate.1.A1\tiTru_5_primer\t384LDV_AQ_B2_HT\tA1\t250\t'
'iTru5_01_A\tACCGACAA\tIndexPCRPlate\tA1')
self.assertEqual(
obs_lines[-1],
'blank.Test.plate.4.H11\tiTru_7_primer\t384LDV_AQ_B2_HT\tP2\t250\t'
'iTru7_115_01\tCAAGGTCT\tIndexPCRPlate\tP22')
class TestPoolingProcess(LabmanTestCase):
def test_compute_pooling_values_eqvol(self):
qpcr_conc = np.array(
[[98.14626462, 487.8121413, 484.3480866, 2.183406934],
[498.3536649, 429.0839787, 402.4270321, 140.1601735],
[21.20533391, 582.9456031, 732.2655041, 7.545145988]])
obs_sample_vols = PoolingProcess.compute_pooling_values_eqvol(
qpcr_conc, total_vol=60.0)
exp_sample_vols = np.zeros([3, 4]) + 5000
npt.assert_allclose(obs_sample_vols, exp_sample_vols)
obs_sample_vols = PoolingProcess.compute_pooling_values_eqvol(
qpcr_conc, total_vol=60)
npt.assert_allclose(obs_sample_vols, exp_sample_vols)
def test_compute_pooling_values_minvol(self):
sample_concs = np.array([[1, 12, 400], [200, 40, 1]])
exp_vols = np.array([[100, 100, 4166.6666666666],
[8333.33333333333, 41666.666666666, 100]])
obs_vols = PoolingProcess.compute_pooling_values_minvol(
sample_concs, total=.01, floor_vol=100, floor_conc=40,
total_each=False, vol_constant=10**9)
npt.assert_allclose(exp_vols, obs_vols)
def test_compute_pooling_values_minvol_amplicon(self):
sample_concs = np.array([[1, 12, 40], [200, 40, 1]])
exp_vols = np.array([[2, 2, 6],
[1.2, 6, 2]])
obs_vols = PoolingProcess.compute_pooling_values_minvol(
sample_concs)
npt.assert_allclose(exp_vols, obs_vols)
def test_adjust_blank_vols(self):
pool_vols = np.array([[2, 2, 6],
[1.2, 6, 2]])
pool_blanks = np.array([[True, False, False],
[False, False, True]])
blank_vol = 1
exp_vols = np.array([[1, 2, 6],
[1.2, 6, 1]])
obs_vols = PoolingProcess.adjust_blank_vols(pool_vols,
pool_blanks,
blank_vol)
npt.assert_allclose(obs_vols, exp_vols)
def test_select_blanks(self):
pool_vols = np.array([[2, 2, 6],
[1.2, 6, 2]])
pool_concs = np.array([[3, 2, 6],
[1.2, 6, 2]])
pool_blanks = np.array([[True, False, False],
[False, False, True]])
exp_vols1 = np.array([[2, 2, 6],
[1.2, 6, 0]])
obs_vols1 = PoolingProcess.select_blanks(pool_vols,
pool_concs,
pool_blanks,
1)
npt.assert_allclose(obs_vols1, exp_vols1)
exp_vols2 = np.array([[2, 2, 6],
[1.2, 6, 2]])
obs_vols2 = PoolingProcess.select_blanks(pool_vols,
pool_concs,
pool_blanks,
2)
npt.assert_allclose(obs_vols2, exp_vols2)
exp_vols0 = np.array([[0, 2, 6],
[1.2, 6, 0]])
obs_vols0 = PoolingProcess.select_blanks(pool_vols,
pool_concs,
pool_blanks,
0)
npt.assert_allclose(obs_vols0, exp_vols0)
def test_select_blanks_num_errors(self):
pool_vols = np.array([[2, 2, 6],
[1.2, 6, 2]])
pool_concs = np.array([[3, 2, 6],
[1.2, 6, 2]])
pool_blanks = np.array([[True, False, False],
[False, False, True]])
with self.assertRaisesRegex(ValueError, "(passed: -1)"):
PoolingProcess.select_blanks(pool_vols,
pool_concs,
pool_blanks,
-1)
def test_select_blanks_shape_errors(self):
pool_vols = np.array([[2, 2, 6],
[1.2, 6, 2],
[1.2, 6, 2]])
pool_concs = np.array([[3, 2, 6],
[1.2, 6, 2]])
pool_blanks = np.array([[True, False, False],
[False, False, True]])
with self.assertRaisesRegex(ValueError, "all input arrays"):
PoolingProcess.select_blanks(pool_vols,
pool_concs,
pool_blanks,
2)
def test_attributes(self):
tester = PoolingProcess(1)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-25 19:10:25'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 16)
self.assertEqual(tester.quantification_process,
QuantificationProcess(1))
self.assertEqual(tester.robot, Equipment(8))
self.assertEqual(tester.destination, '1')
self.assertEqual(tester.pool, PoolComposition(1))
components = tester.components
self.assertEqual(len(components), 95)
self.assertEqual(
components[0], (LibraryPrep16SComposition(1), 1.0))
self.assertEqual(
components[36], (LibraryPrep16SComposition(37), 1.0))
self.assertEqual(
components[94], (LibraryPrep16SComposition(95), 1.0))
def test_create(self):
user = User('test@foo.bar')
quant_proc = QuantificationProcess(1)
robot = Equipment(8)
input_compositions = [
{'composition': Composition.factory(1544), 'input_volume': 1,
'percentage_of_output': 0.25},
{'composition': Composition.factory(1547), 'input_volume': 1,
'percentage_of_output': 0.25},
{'composition': Composition.factory(1550), 'input_volume': 1,
'percentage_of_output': 0.25},
{'composition': Composition.factory(1553), 'input_volume': 1,
'percentage_of_output': 0.25}]
func_data = {"function": "amplicon",
"parameters": {"dna_amount": 240, "min_val": 1,
"max_val": 15, "blank_volume": 2}}
obs = PoolingProcess.create(user, quant_proc, 'New test pool name', 4,
input_compositions, func_data, robot, '1')
self.assertTrue(_help_compare_timestamps(obs.date))
self.assertEqual(obs.personnel, user)
self.assertEqual(obs.quantification_process, quant_proc)
self.assertEqual(obs.robot, robot)
self.assertEqual(obs.pooling_function_data, func_data)
def test_format_picklist(self):
vol_sample = np.array([[10.00, 10.00, np.nan, 5.00, 10.00, 10.00]])
header = ['Source Plate Name,Source Plate Type,Source Well,'
'Concentration,Transfer Volume,Destination Plate Name,'
'Destination Well']
exp_values = ['1,384LDV_AQ_B2_HT,A1,,10.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A2,,10.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A3,,0.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A4,,5.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A5,,10.00,NormalizedDNA,A2',
'1,384LDV_AQ_B2_HT,A6,,10.00,NormalizedDNA,A2']
exp_str = '\n'.join(header + exp_values)
obs_str = PoolingProcess._format_picklist(
vol_sample, max_vol_per_well=26, dest_plate_shape=[16, 24])
self.assertEqual(exp_str, obs_str)
def test_generate_echo_picklist(self):
obs = PoolingProcess(3).generate_echo_picklist()
obs_lines = obs.splitlines()
self.assertEqual(
obs_lines[0],
'Source Plate Name,Source Plate Type,Source Well,Concentration,'
'Transfer Volume,Destination Plate Name,Destination Well')
self.assertEqual(obs_lines[1],
'1,384LDV_AQ_B2_HT,A1,,1.00,NormalizedDNA,A1')
self.assertEqual(obs_lines[-1],
'1,384LDV_AQ_B2_HT,P24,,0.00,NormalizedDNA,A1')
def test_generate_epmotion_file(self):
obs = PoolingProcess(1).generate_epmotion_file()
obs_lines = obs.splitlines()
self.assertEqual(
obs_lines[0], 'Rack,Source,Rack,Destination,Volume,Tool')
self.assertEqual(obs_lines[1], '1,A1,1,1,1.000,1')
self.assertEqual(obs_lines[-1], '1,G12,1,1,1.000,1')
def test_generate_pool_file(self):
self.assertTrue(PoolingProcess(1).generate_pool_file().startswith(
'Rack,Source,Rack,Destination,Volume,Tool'))
self.assertTrue(PoolingProcess(3).generate_pool_file().startswith(
'Source Plate Name,Source Plate Type,Source Well,Concentration,'))
with self.assertRaises(ValueError):
PoolingProcess(2).generate_pool_file()
class TestSequencingProcess(LabmanTestCase):
def test_attributes(self):
tester = SequencingProcess(1)
self.assertEqual(tester.date,
_help_make_datetime('2017-10-25 19:10:25'))
self.assertEqual(tester.personnel, User('test@foo.bar'))
self.assertEqual(tester.process_id, 18)
self.assertEqual(tester.pools, [[PoolComposition(2), 1]])
self.assertEqual(tester.run_name, 'Test Run.1')
self.assertEqual(tester.experiment, 'TestExperiment1')
self.assertEqual(tester.sequencer, Equipment(18))
self.assertEqual(tester.fwd_cycles, 151)
self.assertEqual(tester.rev_cycles, 151)
self.assertEqual(tester.assay, 'Amplicon')
self.assertEqual(tester.principal_investigator, User('test@foo.bar'))
self.assertEqual(
tester.contacts,
[User('admin@foo.bar'), User('demo@microbio.me'),
User('shared@foo.bar')])
def test_list_sequencing_runs(self):
obs = SequencingProcess.list_sequencing_runs()
self.assertEqual(obs[0], {'process_id': 18,
'run_name': 'Test Run.1',
'sequencing_process_id': 1,
'experiment': 'TestExperiment1',
'sequencer_id': 18,
'fwd_cycles': 151,
'rev_cycles': 151,
'assay': 'Amplicon',
'principal_investigator': 'test@foo.bar'})
self.assertEqual(obs[1], {'process_id': 25,
'run_name': 'TestShotgunRun1',
'sequencing_process_id': 2,
'experiment': 'TestExperimentShotgun1',
'sequencer_id': 19,
'fwd_cycles': 151,
'rev_cycles': 151,
'assay': 'Metagenomics',
'principal_investigator': 'test@foo.bar'})
def test_create(self):
user = User('test@foo.bar')
pool = PoolComposition(2)
sequencer = Equipment(19)
obs = SequencingProcess.create(
user, [pool], 'TestCreateRun1', 'TestCreateExperiment1', sequencer,
151, 151, user, contacts=[
User('shared@foo.bar'), User('admin@foo.bar'),
User('demo@microbio.me')])
self.assertTrue(_help_compare_timestamps(obs.date))
self.assertEqual(obs.personnel, user)
self.assertEqual(obs.pools, [[PoolComposition(2), 1]])
self.assertEqual(obs.run_name, 'TestCreateRun1')
self.assertEqual(obs.experiment, 'TestCreateExperiment1')
self.assertEqual(obs.sequencer, Equipment(19))
self.assertEqual(obs.fwd_cycles, 151)
self.assertEqual(obs.rev_cycles, 151)
self.assertEqual(obs.assay, 'Amplicon')
self.assertEqual(obs.principal_investigator, User('test@foo.bar'))
self.assertEqual(
obs.contacts,
[User('admin@foo.bar'), User('demo@microbio.me'),
User('shared@foo.bar')])
def test_bcl_scrub_name(self):
self.assertEqual(SequencingProcess._bcl_scrub_name('test.1'), 'test_1')
self.assertEqual(SequencingProcess._bcl_scrub_name('test-1'), 'test-1')
self.assertEqual(SequencingProcess._bcl_scrub_name('test_1'), 'test_1')
def test_reverse_complement(self):
self.assertEqual(
SequencingProcess._reverse_complement('AGCCT'), 'AGGCT')
def test_sequencer_i5_index(self):
indices = ['AGCT', 'CGGA', 'TGCC']
exp_rc = ['AGCT', 'TCCG', 'GGCA']
obs_hiseq4k = SequencingProcess._sequencer_i5_index(
'HiSeq4000', indices)
self.assertListEqual(obs_hiseq4k, exp_rc)
obs_hiseq25k = SequencingProcess._sequencer_i5_index(
'HiSeq2500', indices)
self.assertListEqual(obs_hiseq25k, indices)
obs_nextseq = SequencingProcess._sequencer_i5_index(
'NextSeq', indices)
self.assertListEqual(obs_nextseq, exp_rc)
with self.assertRaises(ValueError):
SequencingProcess._sequencer_i5_index('foo', indices)
def test_format_sample_sheet_data(self):
# test that single lane works
exp_data = (
'Lane,Sample_ID,Sample_Name,Sample_Plate'
',Sample_Well,I7_Index_ID,index,I5_Index_ID'
',index2,Sample_Project,Well_Description\n'
'1,blank1,blank1,example,B1,iTru7_101_03,TGAGGTGT,'
'iTru5_01_C,CACAGACT,,\n'
'1,sam1,sam1,example,A1,iTru7_101_01,ACGTTACC,'
'iTru5_01_A,ACCGACAA,labperson1_pi1_studyId1,\n'
'1,sam2,sam2,example,A2,iTru7_101_02,CTGTGTTG,'
'iTru5_01_B,AGTGGCAA,labperson1_pi1_studyId1,\n'
'1,sam3,sam3,example,B2,iTru7_101_04,GATCCATG,'
'iTru5_01_D,CGACACTT,labperson1_pi1_studyId1,'
)
wells = ['A1', 'A2', 'B1', 'B2']
sample_ids = ['sam1', 'sam2', 'blank1', 'sam3']
sample_projs = ["labperson1_pi1_studyId1", "labperson1_pi1_studyId1",
"", "labperson1_pi1_studyId1"]
i5_name = ['iTru5_01_A', 'iTru5_01_B', 'iTru5_01_C', 'iTru5_01_D']
i5_seq = ['ACCGACAA', 'AGTGGCAA', 'CACAGACT', 'CGACACTT']
i7_name = ['iTru7_101_01', 'iTru7_101_02',
'iTru7_101_03', 'iTru7_101_04']
i7_seq = ['ACGTTACC', 'CTGTGTTG', 'TGAGGTGT', 'GATCCATG']
sample_plates = ['example'] * 4
obs_data = SequencingProcess._format_sample_sheet_data(
sample_ids, i7_name, i7_seq, i5_name, i5_seq, sample_projs,
wells=wells, sample_plates=sample_plates, lanes=[1])
self.assertEqual(obs_data, exp_data)
# test that two lanes works
exp_data_2 = (
'Lane,Sample_ID,Sample_Name,Sample_Plate,'
'Sample_Well,I7_Index_ID,index,I5_Index_ID,'
'index2,Sample_Project,Well_Description\n'
'1,blank1,blank1,example,B1,iTru7_101_03,TGAGGTGT,'
'iTru5_01_C,CACAGACT,,\n'
'1,sam1,sam1,example,A1,iTru7_101_01,ACGTTACC,'
'iTru5_01_A,ACCGACAA,labperson1_pi1_studyId1,\n'
'1,sam2,sam2,example,A2,iTru7_101_02,CTGTGTTG,'
'iTru5_01_B,AGTGGCAA,labperson1_pi1_studyId1,\n'
'1,sam3,sam3,example,B2,iTru7_101_04,GATCCATG,'
'iTru5_01_D,CGACACTT,labperson1_pi1_studyId1,\n'
'2,blank1,blank1,example,B1,iTru7_101_03,TGAGGTGT'
',iTru5_01_C,CACAGACT,,\n'
'2,sam1,sam1,example,A1,iTru7_101_01,ACGTTACC,'
'iTru5_01_A,ACCGACAA,labperson1_pi1_studyId1,\n'
'2,sam2,sam2,example,A2,iTru7_101_02,CTGTGTTG,'
'iTru5_01_B,AGTGGCAA,labperson1_pi1_studyId1,\n'
'2,sam3,sam3,example,B2,iTru7_101_04,GATCCATG'
',iTru5_01_D,CGACACTT,labperson1_pi1_studyId1,')
obs_data_2 = SequencingProcess._format_sample_sheet_data(
sample_ids, i7_name, i7_seq, i5_name, i5_seq, sample_projs, wells=wells,
sample_plates=sample_plates,
lanes=[1, 2])
self.assertEqual(obs_data_2, exp_data_2)
# test with r/c i5 barcodes
exp_data = (
'Lane,Sample_ID,Sample_Name,Sample_Plate'
',Sample_Well,I7_Index_ID,index,I5_Index_ID'
',index2,Sample_Project,Well_Description\n'
'1,blank1,blank1,example,B1,iTru7_101_03,TGAGGTGT,'
'iTru5_01_C,CACAGACT,,\n'
'1,sam1,sam1,example,A1,iTru7_101_01,ACGTTACC,'
'iTru5_01_A,ACCGACAA,labperson1_pi1_studyId1,\n'
'1,sam2,sam2,example,A2,iTru7_101_02,CTGTGTTG,'
'iTru5_01_B,AGTGGCAA,labperson1_pi1_studyId1,\n'
'1,sam3,sam3,example,B2,iTru7_101_04,GATCCATG,'
'iTru5_01_D,CGACACTT,labperson1_pi1_studyId1,')
i5_seq = ['ACCGACAA', 'AGTGGCAA', 'CACAGACT', 'CGACACTT']
obs_data = SequencingProcess._format_sample_sheet_data(
sample_ids, i7_name, i7_seq, i5_name, i5_seq, sample_projs, wells=wells,
sample_plates=sample_plates, lanes=[1])
self.assertEqual(obs_data, exp_data)
# Test without header
exp_data = (
'1,blank1,blank1,example,B1,iTru7_101_03,TGAGGTGT,'
'iTru5_01_C,CACAGACT,,\n'
'1,sam1,sam1,example,A1,iTru7_101_01,ACGTTACC,'
'iTru5_01_A,ACCGACAA,labperson1_pi1_studyId1,\n'
'1,sam2,sam2,example,A2,iTru7_101_02,CTGTGTTG,'
'iTru5_01_B,AGTGGCAA,labperson1_pi1_studyId1,\n'
'1,sam3,sam3,example,B2,iTru7_101_04,GATCCATG,'
'iTru5_01_D,CGACACTT,labperson1_pi1_studyId1,')
obs_data = SequencingProcess._format_sample_sheet_data(
sample_ids, i7_name, i7_seq, i5_name, i5_seq, sample_projs, wells=wells,
sample_plates=sample_plates, lanes=[1],
include_header=False)
self.assertEqual(obs_data, exp_data)
# Test without lane index (for single-lane sequencers)
exp_data = (
'Sample_ID,Sample_Name,Sample_Plate'
',Sample_Well,I7_Index_ID,index,I5_Index_ID'
',index2,Sample_Project,Well_Description\n'
'blank1,blank1,example,B1,iTru7_101_03,TGAGGTGT,'
'iTru5_01_C,CACAGACT,,\n'
'sam1,sam1,example,A1,iTru7_101_01,ACGTTACC,'
'iTru5_01_A,ACCGACAA,labperson1_pi1_studyId1,\n'
'sam2,sam2,example,A2,iTru7_101_02,CTGTGTTG,'
'iTru5_01_B,AGTGGCAA,labperson1_pi1_studyId1,\n'
'sam3,sam3,example,B2,iTru7_101_04,GATCCATG,'
'iTru5_01_D,CGACACTT,labperson1_pi1_studyId1,')
obs_data = SequencingProcess._format_sample_sheet_data(
sample_ids, i7_name, i7_seq, i5_name, i5_seq, sample_projs, wells=wells,
sample_plates=sample_plates, lanes=[1],
include_lane=False)
self.assertEqual(obs_data, exp_data)
def test_format_sample_sheet_comments(self):
contacts = {'Test User': 'tuser@fake.com',
'Another User': 'anuser@fake.com',
'Jon Jonny': 'jonjonny@foo.com',
'Gregorio Orio': 'gregOrio@foo.com'}
principal_investigator = {'Knight': 'theknight@fake.com'}
other = None
sep = '\t'
exp_comment = (
'PI\tKnight\ttheknight@fake.com\n'
'Contact\tAnother User\tGregorio Orio'
'\tJon Jonny\tTest User\n'
'Contact emails\tanuser@fake.com\tgregOrio@foo.com'
'\tjonjonny@foo.com\ttuser@fake.com\n')
obs_comment = SequencingProcess._format_sample_sheet_comments(
principal_investigator, contacts, other, sep)
self.assertEqual(exp_comment, obs_comment)
def test_format_sample_sheet(self):
tester2 = SequencingProcess(2)
tester2_date = datetime.strftime(
tester2.date, Process.get_date_format())
# Note: cannot hard-code the date in the below known-good text
# because date string representation is specific to time-zone in
# which system running the tests is located!
exp2 = (
'# PI,Dude,test@foo.bar',
'# Contact,Demo,Shared',
'# Contact emails,demo@microbio.me,shared@foo.bar',
'[Header]',
'IEMFileVersion\t4',
'Investigator Name\tDude',
'Experiment Name\tTestExperimentShotgun1',
'Date\t' + tester2_date,
'Workflow\tGenerateFASTQ',
'Application\tFASTQ Only',
'Assay\tMetagenomics',
'Description\t',
'Chemistry\tDefault',
'',
'[Reads]',
'151',
'151',
'',
'[Settings]',
'ReverseComplement\t0',
'',
'[Data]\n'
'Sample_ID\tSample_Name\tSample_Plate\tSample_Well'
'\tI7_Index_ID\tindex\tI5_Index_ID\tindex2\tSample_Project'
'\tWell_Description',
'sam1\tsam1\texample\tA1\tiTru7_101_01\tACGTTACC\tiTru5_01_A'
'\tACCGACAA\texample_proj\t',
'sam2\tsam2\texample\tA2\tiTru7_101_02\tCTGTGTTG\tiTru5_01_B'
'\tAGTGGCAA\texample_proj\t',
'blank1\tblank1\texample\tB1\tiTru7_101_03\tTGAGGTGT\t'
'iTru5_01_C\tCACAGACT\texample_proj\t',
'sam3\tsam3\texample\tB2\tiTru7_101_04\tGATCCATG\tiTru5_01_D'
'\tCGACACTT\texample_proj\t')
data = (
'Sample_ID\tSample_Name\tSample_Plate\tSample_Well\t'
'I7_Index_ID\tindex\tI5_Index_ID\tindex2\tSample_Project\t'
'Well_Description\n'
'sam1\tsam1\texample\tA1\tiTru7_101_01\tACGTTACC\t'
'iTru5_01_A\tACCGACAA\texample_proj\t\n'
'sam2\tsam2\texample\tA2\tiTru7_101_02\tCTGTGTTG\t'
'iTru5_01_B\tAGTGGCAA\texample_proj\t\n'
'blank1\tblank1\texample\tB1\tiTru7_101_03\tTGAGGTGT\t'
'iTru5_01_C\tCACAGACT\texample_proj\t\n'
'sam3\tsam3\texample\tB2\tiTru7_101_04\tGATCCATG\t'
'iTru5_01_D\tCGACACTT\texample_proj\t'
)
exp_sample_sheet = "\n".join(exp2)
obs_sample_sheet = tester2._format_sample_sheet(data, sep='\t')
self.assertEqual(exp_sample_sheet, obs_sample_sheet)
def test_generate_sample_sheet_amplicon_single_lane(self):
# Amplicon run, single lane
tester = SequencingProcess(1)
tester_date = datetime.strftime(tester.date, Process.get_date_format())
# Note: cannot hard-code the date in the below known-good text
# because date string representation is specific to time-zone in
# which system running the tests is located!
obs = tester.generate_sample_sheet()
exp = ('# PI,Dude,test@foo.bar\n'
'# Contact,Admin,Demo,Shared\n'
'# Contact emails,admin@foo.bar,demo@microbio.me,'
'shared@foo.bar\n'
'[Header]\n'
'IEMFileVersion,4\n'
'Investigator Name,Dude\n'
'Experiment Name,TestExperiment1\n'
'Date,' + tester_date + '\n'
'Workflow,GenerateFASTQ\n'
'Application,FASTQ Only\n'
'Assay,TruSeq HT\n'
'Description,\n'
'Chemistry,Amplicon\n\n'
'[Reads]\n'
'151\n'
'151\n\n'
'[Settings]\n'
'ReverseComplement,0\n'
'Adapter,AGATCGGAAGAGCACACGTCTGAACTCCAGTCA\n'
'AdapterRead2,AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT\n\n'
'[Data]\n'
'Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,'
'index,I5_Index_ID,index2,Sample_Project,Well_Description,,\n'
'Test_sequencing_pool_1,,,,,NNNNNNNNNNNN,,,,3080,,,')
self.assertEqual(obs, exp)
def test_generate_sample_sheet_amplicon_multiple_lane(self):
# Amplicon run, multiple lane
user = User('test@foo.bar')
tester = SequencingProcess.create(
user, [PoolComposition(1), PoolComposition(2)], 'TestRun2',
'TestExperiment2', Equipment(19), 151, 151, user,
contacts=[User('shared@foo.bar')])
tester_date = datetime.strftime(tester.date, Process.get_date_format())
obs = tester.generate_sample_sheet()
exp = ('# PI,Dude,test@foo.bar\n'
'# Contact,Shared\n'
'# Contact emails,shared@foo.bar\n'
'[Header]\n'
'IEMFileVersion,4\n'
'Investigator Name,Dude\n'
'Experiment Name,TestExperiment2\n'
'Date,' + tester_date + '\n'
'Workflow,GenerateFASTQ\n'
'Application,FASTQ Only\n'
'Assay,TruSeq HT\n'
'Description,\n'
'Chemistry,Amplicon\n\n'
'[Reads]\n'
'151\n'
'151\n\n'
'[Settings]\n'
'ReverseComplement,0\n'
'Adapter,AGATCGGAAGAGCACACGTCTGAACTCCAGTCA\n'
'AdapterRead2,AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT\n\n'
'[Data]\n'
'Lane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,'
'index,I5_Index_ID,index2,Sample_Project,Well_Description,,\n'
'1,Test_Pool_from_Plate_1,,,,,NNNNNNNNNNNN,,,,3079,,,\n'
'2,Test_sequencing_pool_1,,,,,NNNNNNNNNNNN,,,,3080,,,')
self.assertEqual(obs, exp)
def test_generate_sample_sheet_shotgun(self):
# Shotgun run
tester = SequencingProcess(2)
tester_date = datetime.strftime(tester.date, Process.get_date_format())
obs = tester.generate_sample_sheet().splitlines()
exp = [
'# PI,Dude,test@foo.bar',
'# Contact,Demo,Shared',
'# Contact emails,demo@microbio.me,shared@foo.bar',
'[Header]',
'IEMFileVersion,4',
'Investigator Name,Dude',
'Experiment Name,TestExperimentShotgun1',
'Date,' + tester_date,
'Workflow,GenerateFASTQ',
'Application,FASTQ Only',
'Assay,Metagenomics',
'Description,',
'Chemistry,Default',
'',
'[Reads]',
'151',
'151',
'',
'[Settings]',
'ReverseComplement,0',
'',
'[Data]',
'Lane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,'
'index,I5_Index_ID,index2,Sample_Project,Well_Description',
'1,1_SKB1_640202_Test_plate_1_A1,1_SKB1_640202_Test_plate_1_A1,'
'Test_plate_1,A1,iTru7_101_01,ACGTTACC,iTru5_01_A,'
'TTGTCGGT,LabDude_PIDude_1,1.SKB1.640202.Test.plate.1.A1']
self.assertEqual(obs[:len(exp)], exp)
exp = ('1,vibrio_positive_control_Test_plate_4_G9,'
'vibrio_positive_control_Test_plate_4_G9,'
'Test_plate_4,N18,iTru7_401_08,CGTAGGTT,'
'iTru5_120_F,CATGAGGA,Controls,'
'vibrio.positive.control.Test.plate.4.G9')
self.assertEqual(obs[-1], exp)
def test_generate_sample_sheet_unrecognized_assay_type(self):
# unrecognized assay type
tester = SequencingProcess(3)
with self.assertRaises(ValueError):
tester.generate_sample_sheet()
def test_generate_prep_information(self):
# Sequencing run
tester = SequencingProcess(1)
controls_sheet_id = tester.get_controls_prep_sheet_id()
obs = tester.generate_prep_information()
exp = {1: EXPERIMENTAL_SAMPLES_PREP_EXAMPLE,
controls_sheet_id: CONTROL_SAMPLES_PREP_EXAMPLE}
self.assertEqual(len(obs), len(exp))
self.assertEqual(obs[1], exp[1])
self.assertEqual(obs[controls_sheet_id], exp[controls_sheet_id])
def test_generate_prep_information_error(self):
# Shotgun run--prep not implemented
exp_err = "Prep file generation is not implemented for " \
"Metagenomics assays."
tester = SequencingProcess(2)
with self.assertRaisesRegex(ValueError, exp_err):
tester.generate_prep_information()
# The ordering of positions in this test case recapitulates that provided by
# the wet-lab in known-good examples for plate compression and shotgun library
# prep primer assignment, following an interleaved pattern. See the docstring
# for get_interleaved_quarters_position_generator for more information.
INTERLEAVED_POSITIONS = [
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=0,
input_plate_order_index=0,
input_row_index=0,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=0,
input_plate_order_index=0,
input_row_index=1,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=0,
input_plate_order_index=0,
input_row_index=2,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=0,
input_plate_order_index=0,
input_row_index=3,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=0,
input_plate_order_index=0,
input_row_index=4,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=0,
input_plate_order_index=0,
input_row_index=5,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=0,
input_plate_order_index=0,
input_row_index=6,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=0,
input_plate_order_index=0,
input_row_index=7,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=2,
input_plate_order_index=0,
input_row_index=0,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=2,
input_plate_order_index=0,
input_row_index=1,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=2,
input_plate_order_index=0,
input_row_index=2,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=2,
input_plate_order_index=0,
input_row_index=3,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=2,
input_plate_order_index=0,
input_row_index=4,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=2,
input_plate_order_index=0,
input_row_index=5,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=2,
input_plate_order_index=0,
input_row_index=6,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=2,
input_plate_order_index=0,
input_row_index=7,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=4,
input_plate_order_index=0,
input_row_index=0,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=4,
input_plate_order_index=0,
input_row_index=1,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=4,
input_plate_order_index=0,
input_row_index=2,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=4,
input_plate_order_index=0,
input_row_index=3,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=4,
input_plate_order_index=0,
input_row_index=4,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=4,
input_plate_order_index=0,
input_row_index=5,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=4,
input_plate_order_index=0,
input_row_index=6,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=4,
input_plate_order_index=0,
input_row_index=7,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=6,
input_plate_order_index=0,
input_row_index=0,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=6,
input_plate_order_index=0,
input_row_index=1,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=6,
input_plate_order_index=0,
input_row_index=2,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=6,
input_plate_order_index=0,
input_row_index=3,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=6,
input_plate_order_index=0,
input_row_index=4,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=6,
input_plate_order_index=0,
input_row_index=5,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=6,
input_plate_order_index=0,
input_row_index=6,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=6,
input_plate_order_index=0,
input_row_index=7,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=8,
input_plate_order_index=0,
input_row_index=0,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=8,
input_plate_order_index=0,
input_row_index=1,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=8,
input_plate_order_index=0,
input_row_index=2,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=8,
input_plate_order_index=0,
input_row_index=3,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=8,
input_plate_order_index=0,
input_row_index=4,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=8,
input_plate_order_index=0,
input_row_index=5,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=8,
input_plate_order_index=0,
input_row_index=6,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=8,
input_plate_order_index=0,
input_row_index=7,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=10,
input_plate_order_index=0,
input_row_index=0,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=10,
input_plate_order_index=0,
input_row_index=1,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=10,
input_plate_order_index=0,
input_row_index=2,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=10,
input_plate_order_index=0,
input_row_index=3,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=10,
input_plate_order_index=0,
input_row_index=4,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=10,
input_plate_order_index=0,
input_row_index=5,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=10,
input_plate_order_index=0,
input_row_index=6,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=10,
input_plate_order_index=0,
input_row_index=7,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=12,
input_plate_order_index=0,
input_row_index=0,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=12,
input_plate_order_index=0,
input_row_index=1,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=12,
input_plate_order_index=0,
input_row_index=2,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=12,
input_plate_order_index=0,
input_row_index=3,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=12,
input_plate_order_index=0,
input_row_index=4,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=12,
input_plate_order_index=0,
input_row_index=5,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=12,
input_plate_order_index=0,
input_row_index=6,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=12,
input_plate_order_index=0,
input_row_index=7,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=14,
input_plate_order_index=0,
input_row_index=0,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=14,
input_plate_order_index=0,
input_row_index=1,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=14,
input_plate_order_index=0,
input_row_index=2,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=14,
input_plate_order_index=0,
input_row_index=3,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=14,
input_plate_order_index=0,
input_row_index=4,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=14,
input_plate_order_index=0,
input_row_index=5,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=14,
input_plate_order_index=0,
input_row_index=6,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=14,
input_plate_order_index=0,
input_row_index=7,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=16,
input_plate_order_index=0,
input_row_index=0,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=16,
input_plate_order_index=0,
input_row_index=1,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=16,
input_plate_order_index=0,
input_row_index=2,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=16,
input_plate_order_index=0,
input_row_index=3,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=16,
input_plate_order_index=0,
input_row_index=4,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=16,
input_plate_order_index=0,
input_row_index=5,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=16,
input_plate_order_index=0,
input_row_index=6,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=16,
input_plate_order_index=0,
input_row_index=7,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=18,
input_plate_order_index=0,
input_row_index=0,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=18,
input_plate_order_index=0,
input_row_index=1,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=18,
input_plate_order_index=0,
input_row_index=2,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=18,
input_plate_order_index=0,
input_row_index=3,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=18,
input_plate_order_index=0,
input_row_index=4,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=18,
input_plate_order_index=0,
input_row_index=5,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=18,
input_plate_order_index=0,
input_row_index=6,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=18,
input_plate_order_index=0,
input_row_index=7,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=20,
input_plate_order_index=0,
input_row_index=0,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=20,
input_plate_order_index=0,
input_row_index=1,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=20,
input_plate_order_index=0,
input_row_index=2,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=20,
input_plate_order_index=0,
input_row_index=3,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=20,
input_plate_order_index=0,
input_row_index=4,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=20,
input_plate_order_index=0,
input_row_index=5,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=20,
input_plate_order_index=0,
input_row_index=6,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=20,
input_plate_order_index=0,
input_row_index=7,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=22,
input_plate_order_index=0,
input_row_index=0,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=22,
input_plate_order_index=0,
input_row_index=1,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=22,
input_plate_order_index=0,
input_row_index=2,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=22,
input_plate_order_index=0,
input_row_index=3,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=22,
input_plate_order_index=0,
input_row_index=4,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=22,
input_plate_order_index=0,
input_row_index=5,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=22,
input_plate_order_index=0,
input_row_index=6,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=22,
input_plate_order_index=0,
input_row_index=7,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=1,
input_plate_order_index=1,
input_row_index=0,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=1,
input_plate_order_index=1,
input_row_index=1,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=1,
input_plate_order_index=1,
input_row_index=2,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=1,
input_plate_order_index=1,
input_row_index=3,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=1,
input_plate_order_index=1,
input_row_index=4,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=1,
input_plate_order_index=1,
input_row_index=5,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=1,
input_plate_order_index=1,
input_row_index=6,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=1,
input_plate_order_index=1,
input_row_index=7,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=3,
input_plate_order_index=1,
input_row_index=0,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=3,
input_plate_order_index=1,
input_row_index=1,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=3,
input_plate_order_index=1,
input_row_index=2,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=3,
input_plate_order_index=1,
input_row_index=3,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=3,
input_plate_order_index=1,
input_row_index=4,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=3,
input_plate_order_index=1,
input_row_index=5,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=3,
input_plate_order_index=1,
input_row_index=6,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=3,
input_plate_order_index=1,
input_row_index=7,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=5,
input_plate_order_index=1,
input_row_index=0,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=5,
input_plate_order_index=1,
input_row_index=1,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=5,
input_plate_order_index=1,
input_row_index=2,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=5,
input_plate_order_index=1,
input_row_index=3,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=5,
input_plate_order_index=1,
input_row_index=4,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=5,
input_plate_order_index=1,
input_row_index=5,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=5,
input_plate_order_index=1,
input_row_index=6,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=5,
input_plate_order_index=1,
input_row_index=7,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=7,
input_plate_order_index=1,
input_row_index=0,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=7,
input_plate_order_index=1,
input_row_index=1,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=7,
input_plate_order_index=1,
input_row_index=2,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=7,
input_plate_order_index=1,
input_row_index=3,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=7,
input_plate_order_index=1,
input_row_index=4,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=7,
input_plate_order_index=1,
input_row_index=5,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=7,
input_plate_order_index=1,
input_row_index=6,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=7,
input_plate_order_index=1,
input_row_index=7,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=9,
input_plate_order_index=1,
input_row_index=0,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=9,
input_plate_order_index=1,
input_row_index=1,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=9,
input_plate_order_index=1,
input_row_index=2,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=9,
input_plate_order_index=1,
input_row_index=3,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=9,
input_plate_order_index=1,
input_row_index=4,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=9,
input_plate_order_index=1,
input_row_index=5,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=9,
input_plate_order_index=1,
input_row_index=6,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=9,
input_plate_order_index=1,
input_row_index=7,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=11,
input_plate_order_index=1,
input_row_index=0,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=11,
input_plate_order_index=1,
input_row_index=1,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=11,
input_plate_order_index=1,
input_row_index=2,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=11,
input_plate_order_index=1,
input_row_index=3,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=11,
input_plate_order_index=1,
input_row_index=4,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=11,
input_plate_order_index=1,
input_row_index=5,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=11,
input_plate_order_index=1,
input_row_index=6,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=11,
input_plate_order_index=1,
input_row_index=7,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=13,
input_plate_order_index=1,
input_row_index=0,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=13,
input_plate_order_index=1,
input_row_index=1,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=13,
input_plate_order_index=1,
input_row_index=2,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=13,
input_plate_order_index=1,
input_row_index=3,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=13,
input_plate_order_index=1,
input_row_index=4,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=13,
input_plate_order_index=1,
input_row_index=5,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=13,
input_plate_order_index=1,
input_row_index=6,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=13,
input_plate_order_index=1,
input_row_index=7,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=15,
input_plate_order_index=1,
input_row_index=0,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=15,
input_plate_order_index=1,
input_row_index=1,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=15,
input_plate_order_index=1,
input_row_index=2,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=15,
input_plate_order_index=1,
input_row_index=3,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=15,
input_plate_order_index=1,
input_row_index=4,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=15,
input_plate_order_index=1,
input_row_index=5,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=15,
input_plate_order_index=1,
input_row_index=6,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=15,
input_plate_order_index=1,
input_row_index=7,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=17,
input_plate_order_index=1,
input_row_index=0,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=17,
input_plate_order_index=1,
input_row_index=1,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=17,
input_plate_order_index=1,
input_row_index=2,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=17,
input_plate_order_index=1,
input_row_index=3,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=17,
input_plate_order_index=1,
input_row_index=4,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=17,
input_plate_order_index=1,
input_row_index=5,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=17,
input_plate_order_index=1,
input_row_index=6,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=17,
input_plate_order_index=1,
input_row_index=7,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=19,
input_plate_order_index=1,
input_row_index=0,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=19,
input_plate_order_index=1,
input_row_index=1,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=19,
input_plate_order_index=1,
input_row_index=2,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=19,
input_plate_order_index=1,
input_row_index=3,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=19,
input_plate_order_index=1,
input_row_index=4,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=19,
input_plate_order_index=1,
input_row_index=5,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=19,
input_plate_order_index=1,
input_row_index=6,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=19,
input_plate_order_index=1,
input_row_index=7,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=21,
input_plate_order_index=1,
input_row_index=0,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=21,
input_plate_order_index=1,
input_row_index=1,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=21,
input_plate_order_index=1,
input_row_index=2,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=21,
input_plate_order_index=1,
input_row_index=3,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=21,
input_plate_order_index=1,
input_row_index=4,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=21,
input_plate_order_index=1,
input_row_index=5,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=21,
input_plate_order_index=1,
input_row_index=6,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=21,
input_plate_order_index=1,
input_row_index=7,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=0,
output_col_index=23,
input_plate_order_index=1,
input_row_index=0,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=2,
output_col_index=23,
input_plate_order_index=1,
input_row_index=1,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=4,
output_col_index=23,
input_plate_order_index=1,
input_row_index=2,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=6,
output_col_index=23,
input_plate_order_index=1,
input_row_index=3,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=8,
output_col_index=23,
input_plate_order_index=1,
input_row_index=4,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=10,
output_col_index=23,
input_plate_order_index=1,
input_row_index=5,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=12,
output_col_index=23,
input_plate_order_index=1,
input_row_index=6,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=14,
output_col_index=23,
input_plate_order_index=1,
input_row_index=7,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=0,
input_plate_order_index=2,
input_row_index=0,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=0,
input_plate_order_index=2,
input_row_index=1,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=0,
input_plate_order_index=2,
input_row_index=2,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=0,
input_plate_order_index=2,
input_row_index=3,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=0,
input_plate_order_index=2,
input_row_index=4,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=0,
input_plate_order_index=2,
input_row_index=5,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=0,
input_plate_order_index=2,
input_row_index=6,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=0,
input_plate_order_index=2,
input_row_index=7,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=2,
input_plate_order_index=2,
input_row_index=0,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=2,
input_plate_order_index=2,
input_row_index=1,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=2,
input_plate_order_index=2,
input_row_index=2,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=2,
input_plate_order_index=2,
input_row_index=3,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=2,
input_plate_order_index=2,
input_row_index=4,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=2,
input_plate_order_index=2,
input_row_index=5,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=2,
input_plate_order_index=2,
input_row_index=6,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=2,
input_plate_order_index=2,
input_row_index=7,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=4,
input_plate_order_index=2,
input_row_index=0,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=4,
input_plate_order_index=2,
input_row_index=1,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=4,
input_plate_order_index=2,
input_row_index=2,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=4,
input_plate_order_index=2,
input_row_index=3,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=4,
input_plate_order_index=2,
input_row_index=4,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=4,
input_plate_order_index=2,
input_row_index=5,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=4,
input_plate_order_index=2,
input_row_index=6,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=4,
input_plate_order_index=2,
input_row_index=7,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=6,
input_plate_order_index=2,
input_row_index=0,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=6,
input_plate_order_index=2,
input_row_index=1,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=6,
input_plate_order_index=2,
input_row_index=2,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=6,
input_plate_order_index=2,
input_row_index=3,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=6,
input_plate_order_index=2,
input_row_index=4,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=6,
input_plate_order_index=2,
input_row_index=5,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=6,
input_plate_order_index=2,
input_row_index=6,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=6,
input_plate_order_index=2,
input_row_index=7,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=8,
input_plate_order_index=2,
input_row_index=0,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=8,
input_plate_order_index=2,
input_row_index=1,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=8,
input_plate_order_index=2,
input_row_index=2,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=8,
input_plate_order_index=2,
input_row_index=3,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=8,
input_plate_order_index=2,
input_row_index=4,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=8,
input_plate_order_index=2,
input_row_index=5,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=8,
input_plate_order_index=2,
input_row_index=6,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=8,
input_plate_order_index=2,
input_row_index=7,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=10,
input_plate_order_index=2,
input_row_index=0,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=10,
input_plate_order_index=2,
input_row_index=1,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=10,
input_plate_order_index=2,
input_row_index=2,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=10,
input_plate_order_index=2,
input_row_index=3,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=10,
input_plate_order_index=2,
input_row_index=4,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=10,
input_plate_order_index=2,
input_row_index=5,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=10,
input_plate_order_index=2,
input_row_index=6,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=10,
input_plate_order_index=2,
input_row_index=7,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=12,
input_plate_order_index=2,
input_row_index=0,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=12,
input_plate_order_index=2,
input_row_index=1,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=12,
input_plate_order_index=2,
input_row_index=2,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=12,
input_plate_order_index=2,
input_row_index=3,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=12,
input_plate_order_index=2,
input_row_index=4,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=12,
input_plate_order_index=2,
input_row_index=5,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=12,
input_plate_order_index=2,
input_row_index=6,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=12,
input_plate_order_index=2,
input_row_index=7,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=14,
input_plate_order_index=2,
input_row_index=0,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=14,
input_plate_order_index=2,
input_row_index=1,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=14,
input_plate_order_index=2,
input_row_index=2,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=14,
input_plate_order_index=2,
input_row_index=3,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=14,
input_plate_order_index=2,
input_row_index=4,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=14,
input_plate_order_index=2,
input_row_index=5,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=14,
input_plate_order_index=2,
input_row_index=6,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=14,
input_plate_order_index=2,
input_row_index=7,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=16,
input_plate_order_index=2,
input_row_index=0,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=16,
input_plate_order_index=2,
input_row_index=1,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=16,
input_plate_order_index=2,
input_row_index=2,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=16,
input_plate_order_index=2,
input_row_index=3,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=16,
input_plate_order_index=2,
input_row_index=4,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=16,
input_plate_order_index=2,
input_row_index=5,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=16,
input_plate_order_index=2,
input_row_index=6,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=16,
input_plate_order_index=2,
input_row_index=7,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=18,
input_plate_order_index=2,
input_row_index=0,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=18,
input_plate_order_index=2,
input_row_index=1,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=18,
input_plate_order_index=2,
input_row_index=2,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=18,
input_plate_order_index=2,
input_row_index=3,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=18,
input_plate_order_index=2,
input_row_index=4,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=18,
input_plate_order_index=2,
input_row_index=5,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=18,
input_plate_order_index=2,
input_row_index=6,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=18,
input_plate_order_index=2,
input_row_index=7,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=20,
input_plate_order_index=2,
input_row_index=0,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=20,
input_plate_order_index=2,
input_row_index=1,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=20,
input_plate_order_index=2,
input_row_index=2,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=20,
input_plate_order_index=2,
input_row_index=3,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=20,
input_plate_order_index=2,
input_row_index=4,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=20,
input_plate_order_index=2,
input_row_index=5,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=20,
input_plate_order_index=2,
input_row_index=6,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=20,
input_plate_order_index=2,
input_row_index=7,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=22,
input_plate_order_index=2,
input_row_index=0,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=22,
input_plate_order_index=2,
input_row_index=1,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=22,
input_plate_order_index=2,
input_row_index=2,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=22,
input_plate_order_index=2,
input_row_index=3,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=22,
input_plate_order_index=2,
input_row_index=4,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=22,
input_plate_order_index=2,
input_row_index=5,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=22,
input_plate_order_index=2,
input_row_index=6,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=22,
input_plate_order_index=2,
input_row_index=7,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=1,
input_plate_order_index=3,
input_row_index=0,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=1,
input_plate_order_index=3,
input_row_index=1,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=1,
input_plate_order_index=3,
input_row_index=2,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=1,
input_plate_order_index=3,
input_row_index=3,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=1,
input_plate_order_index=3,
input_row_index=4,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=1,
input_plate_order_index=3,
input_row_index=5,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=1,
input_plate_order_index=3,
input_row_index=6,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=1,
input_plate_order_index=3,
input_row_index=7,
input_col_index=0),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=3,
input_plate_order_index=3,
input_row_index=0,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=3,
input_plate_order_index=3,
input_row_index=1,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=3,
input_plate_order_index=3,
input_row_index=2,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=3,
input_plate_order_index=3,
input_row_index=3,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=3,
input_plate_order_index=3,
input_row_index=4,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=3,
input_plate_order_index=3,
input_row_index=5,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=3,
input_plate_order_index=3,
input_row_index=6,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=3,
input_plate_order_index=3,
input_row_index=7,
input_col_index=1),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=5,
input_plate_order_index=3,
input_row_index=0,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=5,
input_plate_order_index=3,
input_row_index=1,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=5,
input_plate_order_index=3,
input_row_index=2,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=5,
input_plate_order_index=3,
input_row_index=3,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=5,
input_plate_order_index=3,
input_row_index=4,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=5,
input_plate_order_index=3,
input_row_index=5,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=5,
input_plate_order_index=3,
input_row_index=6,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=5,
input_plate_order_index=3,
input_row_index=7,
input_col_index=2),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=7,
input_plate_order_index=3,
input_row_index=0,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=7,
input_plate_order_index=3,
input_row_index=1,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=7,
input_plate_order_index=3,
input_row_index=2,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=7,
input_plate_order_index=3,
input_row_index=3,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=7,
input_plate_order_index=3,
input_row_index=4,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=7,
input_plate_order_index=3,
input_row_index=5,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=7,
input_plate_order_index=3,
input_row_index=6,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=7,
input_plate_order_index=3,
input_row_index=7,
input_col_index=3),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=9,
input_plate_order_index=3,
input_row_index=0,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=9,
input_plate_order_index=3,
input_row_index=1,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=9,
input_plate_order_index=3,
input_row_index=2,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=9,
input_plate_order_index=3,
input_row_index=3,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=9,
input_plate_order_index=3,
input_row_index=4,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=9,
input_plate_order_index=3,
input_row_index=5,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=9,
input_plate_order_index=3,
input_row_index=6,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=9,
input_plate_order_index=3,
input_row_index=7,
input_col_index=4),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=11,
input_plate_order_index=3,
input_row_index=0,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=11,
input_plate_order_index=3,
input_row_index=1,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=11,
input_plate_order_index=3,
input_row_index=2,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=11,
input_plate_order_index=3,
input_row_index=3,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=11,
input_plate_order_index=3,
input_row_index=4,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=11,
input_plate_order_index=3,
input_row_index=5,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=11,
input_plate_order_index=3,
input_row_index=6,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=11,
input_plate_order_index=3,
input_row_index=7,
input_col_index=5),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=13,
input_plate_order_index=3,
input_row_index=0,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=13,
input_plate_order_index=3,
input_row_index=1,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=13,
input_plate_order_index=3,
input_row_index=2,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=13,
input_plate_order_index=3,
input_row_index=3,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=13,
input_plate_order_index=3,
input_row_index=4,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=13,
input_plate_order_index=3,
input_row_index=5,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=13,
input_plate_order_index=3,
input_row_index=6,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=13,
input_plate_order_index=3,
input_row_index=7,
input_col_index=6),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=15,
input_plate_order_index=3,
input_row_index=0,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=15,
input_plate_order_index=3,
input_row_index=1,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=15,
input_plate_order_index=3,
input_row_index=2,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=15,
input_plate_order_index=3,
input_row_index=3,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=15,
input_plate_order_index=3,
input_row_index=4,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=15,
input_plate_order_index=3,
input_row_index=5,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=15,
input_plate_order_index=3,
input_row_index=6,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=15,
input_plate_order_index=3,
input_row_index=7,
input_col_index=7),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=17,
input_plate_order_index=3,
input_row_index=0,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=17,
input_plate_order_index=3,
input_row_index=1,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=17,
input_plate_order_index=3,
input_row_index=2,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=17,
input_plate_order_index=3,
input_row_index=3,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=17,
input_plate_order_index=3,
input_row_index=4,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=17,
input_plate_order_index=3,
input_row_index=5,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=17,
input_plate_order_index=3,
input_row_index=6,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=17,
input_plate_order_index=3,
input_row_index=7,
input_col_index=8),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=19,
input_plate_order_index=3,
input_row_index=0,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=19,
input_plate_order_index=3,
input_row_index=1,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=19,
input_plate_order_index=3,
input_row_index=2,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=19,
input_plate_order_index=3,
input_row_index=3,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=19,
input_plate_order_index=3,
input_row_index=4,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=19,
input_plate_order_index=3,
input_row_index=5,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=19,
input_plate_order_index=3,
input_row_index=6,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=19,
input_plate_order_index=3,
input_row_index=7,
input_col_index=9),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=21,
input_plate_order_index=3,
input_row_index=0,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=21,
input_plate_order_index=3,
input_row_index=1,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=21,
input_plate_order_index=3,
input_row_index=2,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=21,
input_plate_order_index=3,
input_row_index=3,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=21,
input_plate_order_index=3,
input_row_index=4,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=21,
input_plate_order_index=3,
input_row_index=5,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=21,
input_plate_order_index=3,
input_row_index=6,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=21,
input_plate_order_index=3,
input_row_index=7,
input_col_index=10),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=1,
output_col_index=23,
input_plate_order_index=3,
input_row_index=0,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=3,
output_col_index=23,
input_plate_order_index=3,
input_row_index=1,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=5,
output_col_index=23,
input_plate_order_index=3,
input_row_index=2,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=7,
output_col_index=23,
input_plate_order_index=3,
input_row_index=3,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=9,
output_col_index=23,
input_plate_order_index=3,
input_row_index=4,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=11,
output_col_index=23,
input_plate_order_index=3,
input_row_index=5,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=13,
output_col_index=23,
input_plate_order_index=3,
input_row_index=6,
input_col_index=11),
GDNAPlateCompressionProcess.InterleavedPosition(output_row_index=15,
output_col_index=23,
input_plate_order_index=3,
input_row_index=7,
input_col_index=11)]
SHOTGUN_PRIMER_LAYOUT = [[['A1', 'iTru7_115_09', 'iTru5_121_H'],
['A2', 'iTru7_108_09', 'iTru5_08_H'],
['A3', 'iTru7_101_06', 'iTru5_05_A'],
['A4', 'iTru7_109_05', 'iTru5_16_A'],
['A5', 'iTru7_102_02', 'iTru5_01_B'],
['A6', 'iTru7_110_01', 'iTru5_24_A'],
['A7', 'iTru7_102_10', 'iTru5_09_B'],
['A8', 'iTru7_110_09', 'iTru5_20_B'],
['A9', 'iTru7_103_06', 'iTru5_05_C'],
['A10', 'iTru7_111_05', 'iTru5_16_C'],
['A11', 'iTru7_104_02', 'iTru5_01_D'],
['A12', 'iTru7_112_01', 'iTru5_24_C'],
['A13', 'iTru7_104_10', 'iTru5_09_D'],
['A14', 'iTru7_112_09', 'iTru5_20_D'],
['A15', 'iTru7_105_06', 'iTru5_05_E'],
['A16', 'iTru7_113_05', 'iTru5_16_E'],
['A17', 'iTru7_106_02', 'iTru5_01_F'],
['A18', 'iTru7_114_01', 'iTru5_24_E'],
['A19', 'iTru7_106_10', 'iTru5_09_F'],
['A20', 'iTru7_114_09', 'iTru5_20_F'],
['A21', 'iTru7_107_06', 'iTru5_05_G'],
['A22', 'iTru7_201_05', 'iTru5_16_G'],
['A23', 'iTru7_108_02', 'iTru5_01_H'],
['A24', 'iTru7_202_01', 'iTru5_24_G']],
[['B1', 'iTru7_202_08', 'iTru5_19_H'],
['B2', 'iTru7_210_07', 'iTru5_106_H'],
['B3', 'iTru7_203_04', 'iTru5_103_A'],
['B4', 'iTru7_301_03', 'iTru5_114_A'],
['B5', 'iTru7_203_12', 'iTru5_111_A'],
['B6', 'iTru7_301_11', 'iTru5_122_A'],
['B7', 'iTru7_204_08', 'iTru5_107_B'],
['B8', 'iTru7_302_07', 'iTru5_118_B'],
['B9', 'iTru7_205_04', 'iTru5_103_C'],
['B10', 'iTru7_303_03', 'iTru5_114_C'],
['B11', 'iTru7_205_12', 'iTru5_111_C'],
['B12', 'iTru7_303_11', 'iTru5_122_C'],
['B13', 'iTru7_206_08', 'iTru5_107_D'],
['B14', 'iTru7_304_07', 'iTru5_118_D'],
['B15', 'iTru7_207_04', 'iTru5_103_E'],
['B16', 'iTru7_305_03', 'iTru5_114_E'],
['B17', 'iTru7_207_12', 'iTru5_111_E'],
['B18', 'iTru7_305_11', 'iTru5_122_E'],
['B19', 'iTru7_208_08', 'iTru5_107_F'],
['B20', 'iTru7_401_07', 'iTru5_118_F'],
['B21', 'iTru7_209_04', 'iTru5_103_G'],
['B22', 'iTru7_402_03', 'iTru5_114_G'],
['B23', 'iTru7_209_12', 'iTru5_111_G'],
['B24', 'iTru7_402_11', 'iTru5_122_G']],
[['C1', 'iTru7_115_10', 'iTru5_122_H'],
['C2', 'iTru7_108_10', 'iTru5_09_H'],
['C3', 'iTru7_101_07', 'iTru5_06_A'],
['C4', 'iTru7_109_06', 'iTru5_17_A'],
['C5', 'iTru7_102_03', 'iTru5_02_B'],
['C6', 'iTru7_110_02', 'iTru5_13_B'],
['C7', 'iTru7_102_11', 'iTru5_10_B'],
['C8', 'iTru7_110_10', 'iTru5_21_B'],
['C9', 'iTru7_103_07', 'iTru5_06_C'],
['C10', 'iTru7_111_06', 'iTru5_17_C'],
['C11', 'iTru7_104_03', 'iTru5_02_D'],
['C12', 'iTru7_112_02', 'iTru5_13_D'],
['C13', 'iTru7_104_11', 'iTru5_10_D'],
['C14', 'iTru7_112_10', 'iTru5_21_D'],
['C15', 'iTru7_105_07', 'iTru5_06_E'],
['C16', 'iTru7_113_06', 'iTru5_17_E'],
['C17', 'iTru7_106_03', 'iTru5_02_F'],
['C18', 'iTru7_114_02', 'iTru5_13_F'],
['C19', 'iTru7_106_11', 'iTru5_10_F'],
['C20', 'iTru7_114_10', 'iTru5_21_F'],
['C21', 'iTru7_107_07', 'iTru5_06_G'],
['C22', 'iTru7_201_06', 'iTru5_17_G'],
['C23', 'iTru7_108_03', 'iTru5_02_H'],
['C24', 'iTru7_202_02', 'iTru5_13_H']],
[['D1', 'iTru7_202_09', 'iTru5_20_H'],
['D2', 'iTru7_210_08', 'iTru5_107_H'],
['D3', 'iTru7_203_05', 'iTru5_104_A'],
['D4', 'iTru7_301_04', 'iTru5_115_A'],
['D5', 'iTru7_204_01', 'iTru5_112_A'],
['D6', 'iTru7_301_12', 'iTru5_123_A'],
['D7', 'iTru7_204_09', 'iTru5_108_B'],
['D8', 'iTru7_302_08', 'iTru5_119_B'],
['D9', 'iTru7_205_05', 'iTru5_104_C'],
['D10', 'iTru7_303_04', 'iTru5_115_C'],
['D11', 'iTru7_206_01', 'iTru5_112_C'],
['D12', 'iTru7_303_12', 'iTru5_123_C'],
['D13', 'iTru7_206_09', 'iTru5_108_D'],
['D14', 'iTru7_304_08', 'iTru5_119_D'],
['D15', 'iTru7_207_05', 'iTru5_104_E'],
['D16', 'iTru7_305_04', 'iTru5_115_E'],
['D17', 'iTru7_208_01', 'iTru5_112_E'],
['D18', 'iTru7_305_12', 'iTru5_123_E'],
['D19', 'iTru7_208_09', 'iTru5_108_F'],
['D20', 'iTru7_401_08', 'iTru5_119_F'],
['D21', 'iTru7_209_05', 'iTru5_104_G'],
['D22', 'iTru7_402_04', 'iTru5_115_G'],
['D23', 'iTru7_210_01', 'iTru5_112_G'],
['D24', 'iTru7_402_12', 'iTru5_123_G']],
[['E1', 'iTru7_115_11', 'iTru5_123_H'],
['E2', 'iTru7_108_11', 'iTru5_10_H'],
['E3', 'iTru7_101_08', 'iTru5_07_A'],
['E4', 'iTru7_109_07', 'iTru5_18_A'],
['E5', 'iTru7_102_04', 'iTru5_03_B'],
['E6', 'iTru7_110_03', 'iTru5_14_B'],
['E7', 'iTru7_102_12', 'iTru5_11_B'],
['E8', 'iTru7_110_11', 'iTru5_22_B'],
['E9', 'iTru7_103_08', 'iTru5_07_C'],
['E10', 'iTru7_111_07', 'iTru5_18_C'],
['E11', 'iTru7_104_04', 'iTru5_03_D'],
['E12', 'iTru7_112_03', 'iTru5_14_D'],
['E13', 'iTru7_104_12', 'iTru5_11_D'],
['E14', 'iTru7_112_11', 'iTru5_22_D'],
['E15', 'iTru7_105_08', 'iTru5_07_E'],
['E16', 'iTru7_113_07', 'iTru5_18_E'],
['E17', 'iTru7_106_04', 'iTru5_03_F'],
['E18', 'iTru7_114_03', 'iTru5_14_F'],
['E19', 'iTru7_106_12', 'iTru5_11_F'],
['E20', 'iTru7_114_11', 'iTru5_22_F'],
['E21', 'iTru7_107_08', 'iTru5_07_G'],
['E22', 'iTru7_201_07', 'iTru5_18_G'],
['E23', 'iTru7_108_04', 'iTru5_03_H'],
['E24', 'iTru7_202_03', 'iTru5_14_H']],
[['F1', 'iTru7_202_10', 'iTru5_21_H'],
['F2', 'iTru7_210_09', 'iTru5_108_H'],
['F3', 'iTru7_203_06', 'iTru5_105_A'],
['F4', 'iTru7_301_05', 'iTru5_116_A'],
['F5', 'iTru7_204_02', 'iTru5_101_B'],
['F6', 'iTru7_302_01', 'iTru5_124_A'],
['F7', 'iTru7_204_10', 'iTru5_109_B'],
['F8', 'iTru7_302_09', 'iTru5_120_B'],
['F9', 'iTru7_205_06', 'iTru5_105_C'],
['F10', 'iTru7_303_05', 'iTru5_116_C'],
['F11', 'iTru7_206_02', 'iTru5_101_D'],
['F12', 'iTru7_304_01', 'iTru5_124_C'],
['F13', 'iTru7_206_10', 'iTru5_109_D'],
['F14', 'iTru7_304_09', 'iTru5_120_D'],
['F15', 'iTru7_207_06', 'iTru5_105_E'],
['F16', 'iTru7_305_05', 'iTru5_116_E'],
['F17', 'iTru7_208_02', 'iTru5_101_F'],
['F18', 'iTru7_401_01', 'iTru5_124_E'],
['F19', 'iTru7_208_10', 'iTru5_109_F'],
['F20', 'iTru7_401_09', 'iTru5_120_F'],
['F21', 'iTru7_209_06', 'iTru5_105_G'],
['F22', 'iTru7_402_05', 'iTru5_116_G'],
['F23', 'iTru7_210_02', 'iTru5_101_H'],
['F24', 'iTru7_115_01', 'iTru5_124_G']],
[['G1', 'iTru7_211_01', 'iTru5_124_H'],
['G2', 'iTru7_108_12', 'iTru5_11_H'],
['G3', 'iTru7_101_09', 'iTru5_08_A'],
['G4', 'iTru7_109_08', 'iTru5_19_A'],
['G5', 'iTru7_102_05', 'iTru5_04_B'],
['G6', 'iTru7_110_04', 'iTru5_15_B'],
['G7', 'iTru7_103_01', 'iTru5_12_B'],
['G8', 'iTru7_110_12', 'iTru5_23_B'],
['G9', 'iTru7_103_09', 'iTru5_08_C'],
['G10', 'iTru7_111_08', 'iTru5_19_C'],
['G11', 'iTru7_104_05', 'iTru5_04_D'],
['G12', 'iTru7_112_04', 'iTru5_15_D'],
['G13', 'iTru7_105_01', 'iTru5_12_D'],
['G14', 'iTru7_112_12', 'iTru5_23_D'],
['G15', 'iTru7_105_09', 'iTru5_08_E'],
['G16', 'iTru7_113_08', 'iTru5_19_E'],
['G17', 'iTru7_106_05', 'iTru5_04_F'],
['G18', 'iTru7_114_04', 'iTru5_15_F'],
['G19', 'iTru7_107_01', 'iTru5_12_F'],
['G20', 'iTru7_114_12', 'iTru5_23_F'],
['G21', 'iTru7_107_09', 'iTru5_08_G'],
['G22', 'iTru7_201_08', 'iTru5_19_G'],
['G23', 'iTru7_108_05', 'iTru5_04_H'],
['G24', 'iTru7_202_04', 'iTru5_15_H']],
[['H1', 'iTru7_202_11', 'iTru5_22_H'],
['H2', 'iTru7_210_10', 'iTru5_109_H'],
['H3', 'iTru7_203_07', 'iTru5_106_A'],
['H4', 'iTru7_301_06', 'iTru5_117_A'],
['H5', 'iTru7_204_03', 'iTru5_102_B'],
['H6', 'iTru7_302_02', 'iTru5_113_B'],
['H7', 'iTru7_204_11', 'iTru5_110_B'],
['H8', 'iTru7_302_10', 'iTru5_121_B'],
['H9', 'iTru7_205_07', 'iTru5_106_C'],
['H10', 'iTru7_303_06', 'iTru5_117_C'],
['H11', 'iTru7_206_03', 'iTru5_102_D'],
['H12', 'iTru7_304_02', 'iTru5_113_D'],
['H13', 'iTru7_206_11', 'iTru5_110_D'],
['H14', 'iTru7_304_10', 'iTru5_121_D'],
['H15', 'iTru7_207_07', 'iTru5_106_E'],
['H16', 'iTru7_305_06', 'iTru5_117_E'],
['H17', 'iTru7_208_03', 'iTru5_102_F'],
['H18', 'iTru7_401_02', 'iTru5_113_F'],
['H19', 'iTru7_208_11', 'iTru5_110_F'],
['H20', 'iTru7_401_10', 'iTru5_121_F'],
['H21', 'iTru7_209_07', 'iTru5_106_G'],
['H22', 'iTru7_402_06', 'iTru5_117_G'],
['H23', 'iTru7_210_03', 'iTru5_102_H'],
['H24', 'iTru7_115_02', 'iTru5_113_H']],
[['I1', 'iTru7_101_02', 'iTru5_01_A'],
['I2', 'iTru7_109_01', 'iTru5_12_H'],
['I3', 'iTru7_101_10', 'iTru5_09_A'],
['I4', 'iTru7_109_09', 'iTru5_20_A'],
['I5', 'iTru7_102_06', 'iTru5_05_B'],
['I6', 'iTru7_110_05', 'iTru5_16_B'],
['I7', 'iTru7_103_02', 'iTru5_01_C'],
['I8', 'iTru7_111_01', 'iTru5_24_B'],
['I9', 'iTru7_103_10', 'iTru5_09_C'],
['I10', 'iTru7_111_09', 'iTru5_20_C'],
['I11', 'iTru7_104_06', 'iTru5_05_D'],
['I12', 'iTru7_112_05', 'iTru5_16_D'],
['I13', 'iTru7_105_02', 'iTru5_01_E'],
['I14', 'iTru7_113_01', 'iTru5_24_D'],
['I15', 'iTru7_105_10', 'iTru5_09_E'],
['I16', 'iTru7_113_09', 'iTru5_20_E'],
['I17', 'iTru7_106_06', 'iTru5_05_F'],
['I18', 'iTru7_114_05', 'iTru5_16_F'],
['I19', 'iTru7_107_02', 'iTru5_01_G'],
['I20', 'iTru7_201_01', 'iTru5_24_F'],
['I21', 'iTru7_107_10', 'iTru5_09_G'],
['I22', 'iTru7_201_09', 'iTru5_20_G'],
['I23', 'iTru7_108_06', 'iTru5_05_H'],
['I24', 'iTru7_202_05', 'iTru5_16_H']],
[['J1', 'iTru7_202_12', 'iTru5_23_H'],
['J2', 'iTru7_210_11', 'iTru5_110_H'],
['J3', 'iTru7_203_08', 'iTru5_107_A'],
['J4', 'iTru7_301_07', 'iTru5_118_A'],
['J5', 'iTru7_204_04', 'iTru5_103_B'],
['J6', 'iTru7_302_03', 'iTru5_114_B'],
['J7', 'iTru7_204_12', 'iTru5_111_B'],
['J8', 'iTru7_302_11', 'iTru5_122_B'],
['J9', 'iTru7_205_08', 'iTru5_107_C'],
['J10', 'iTru7_303_07', 'iTru5_118_C'],
['J11', 'iTru7_206_04', 'iTru5_103_D'],
['J12', 'iTru7_304_03', 'iTru5_114_D'],
['J13', 'iTru7_206_12', 'iTru5_111_D'],
['J14', 'iTru7_304_11', 'iTru5_122_D'],
['J15', 'iTru7_207_08', 'iTru5_107_E'],
['J16', 'iTru7_305_07', 'iTru5_118_E'],
['J17', 'iTru7_208_04', 'iTru5_103_F'],
['J18', 'iTru7_401_03', 'iTru5_114_F'],
['J19', 'iTru7_208_12', 'iTru5_111_F'],
['J20', 'iTru7_401_11', 'iTru5_122_F'],
['J21', 'iTru7_209_08', 'iTru5_107_G'],
['J22', 'iTru7_402_07', 'iTru5_118_G'],
['J23', 'iTru7_210_04', 'iTru5_103_H'],
['J24', 'iTru7_115_03', 'iTru5_114_H']],
[['K1', 'iTru7_101_03', 'iTru5_02_A'],
['K2', 'iTru7_109_02', 'iTru5_13_A'],
['K3', 'iTru7_101_11', 'iTru5_10_A'],
['K4', 'iTru7_109_10', 'iTru5_21_A'],
['K5', 'iTru7_102_07', 'iTru5_06_B'],
['K6', 'iTru7_110_06', 'iTru5_17_B'],
['K7', 'iTru7_103_03', 'iTru5_02_C'],
['K8', 'iTru7_111_02', 'iTru5_13_C'],
['K9', 'iTru7_103_11', 'iTru5_10_C'],
['K10', 'iTru7_111_10', 'iTru5_21_C'],
['K11', 'iTru7_104_07', 'iTru5_06_D'],
['K12', 'iTru7_112_06', 'iTru5_17_D'],
['K13', 'iTru7_105_03', 'iTru5_02_E'],
['K14', 'iTru7_113_02', 'iTru5_13_E'],
['K15', 'iTru7_105_11', 'iTru5_10_E'],
['K16', 'iTru7_113_10', 'iTru5_21_E'],
['K17', 'iTru7_106_07', 'iTru5_06_F'],
['K18', 'iTru7_114_06', 'iTru5_17_F'],
['K19', 'iTru7_107_03', 'iTru5_02_G'],
['K20', 'iTru7_201_02', 'iTru5_13_G'],
['K21', 'iTru7_107_11', 'iTru5_10_G'],
['K22', 'iTru7_201_10', 'iTru5_21_G'],
['K23', 'iTru7_108_07', 'iTru5_06_H'],
['K24', 'iTru7_202_06', 'iTru5_17_H']],
[['L1', 'iTru7_203_01', 'iTru5_24_H'],
['L2', 'iTru7_210_12', 'iTru5_111_H'],
['L3', 'iTru7_203_09', 'iTru5_108_A'],
['L4', 'iTru7_301_08', 'iTru5_119_A'],
['L5', 'iTru7_204_05', 'iTru5_104_B'],
['L6', 'iTru7_302_04', 'iTru5_115_B'],
['L7', 'iTru7_205_01', 'iTru5_112_B'],
['L8', 'iTru7_302_12', 'iTru5_123_B'],
['L9', 'iTru7_205_09', 'iTru5_108_C'],
['L10', 'iTru7_303_08', 'iTru5_119_C'],
['L11', 'iTru7_206_05', 'iTru5_104_D'],
['L12', 'iTru7_304_04', 'iTru5_115_D'],
['L13', 'iTru7_207_01', 'iTru5_112_D'],
['L14', 'iTru7_304_12', 'iTru5_123_D'],
['L15', 'iTru7_207_09', 'iTru5_108_E'],
['L16', 'iTru7_305_08', 'iTru5_119_E'],
['L17', 'iTru7_208_05', 'iTru5_104_F'],
['L18', 'iTru7_401_04', 'iTru5_115_F'],
['L19', 'iTru7_209_01', 'iTru5_112_F'],
['L20', 'iTru7_401_12', 'iTru5_123_F'],
['L21', 'iTru7_209_09', 'iTru5_108_G'],
['L22', 'iTru7_402_08', 'iTru5_119_G'],
['L23', 'iTru7_210_05', 'iTru5_104_H'],
['L24', 'iTru7_115_04', 'iTru5_115_H']],
[['M1', 'iTru7_101_04', 'iTru5_03_A'],
['M2', 'iTru7_109_03', 'iTru5_14_A'],
['M3', 'iTru7_101_12', 'iTru5_11_A'],
['M4', 'iTru7_109_11', 'iTru5_22_A'],
['M5', 'iTru7_102_08', 'iTru5_07_B'],
['M6', 'iTru7_110_07', 'iTru5_18_B'],
['M7', 'iTru7_103_04', 'iTru5_03_C'],
['M8', 'iTru7_111_03', 'iTru5_14_C'],
['M9', 'iTru7_103_12', 'iTru5_11_C'],
['M10', 'iTru7_111_11', 'iTru5_22_C'],
['M11', 'iTru7_104_08', 'iTru5_07_D'],
['M12', 'iTru7_112_07', 'iTru5_18_D'],
['M13', 'iTru7_105_04', 'iTru5_03_E'],
['M14', 'iTru7_113_03', 'iTru5_14_E'],
['M15', 'iTru7_105_12', 'iTru5_11_E'],
['M16', 'iTru7_113_11', 'iTru5_22_E'],
['M17', 'iTru7_106_08', 'iTru5_07_F'],
['M18', 'iTru7_114_07', 'iTru5_18_F'],
['M19', 'iTru7_107_04', 'iTru5_03_G'],
['M20', 'iTru7_201_03', 'iTru5_14_G'],
['M21', 'iTru7_107_12', 'iTru5_11_G'],
['M22', 'iTru7_201_11', 'iTru5_22_G'],
['M23', 'iTru7_108_08', 'iTru5_07_H'],
['M24', 'iTru7_202_07', 'iTru5_18_H']],
[['N1', 'iTru7_203_02', 'iTru5_101_A'],
['N2', 'iTru7_301_01', 'iTru5_112_H'],
['N3', 'iTru7_203_10', 'iTru5_109_A'],
['N4', 'iTru7_301_09', 'iTru5_120_A'],
['N5', 'iTru7_204_06', 'iTru5_105_B'],
['N6', 'iTru7_302_05', 'iTru5_116_B'],
['N7', 'iTru7_205_02', 'iTru5_101_C'],
['N8', 'iTru7_303_01', 'iTru5_124_B'],
['N9', 'iTru7_205_10', 'iTru5_109_C'],
['N10', 'iTru7_303_09', 'iTru5_120_C'],
['N11', 'iTru7_206_06', 'iTru5_105_D'],
['N12', 'iTru7_304_05', 'iTru5_116_D'],
['N13', 'iTru7_207_02', 'iTru5_101_E'],
['N14', 'iTru7_305_01', 'iTru5_124_D'],
['N15', 'iTru7_207_10', 'iTru5_109_E'],
['N16', 'iTru7_305_09', 'iTru5_120_E'],
['N17', 'iTru7_208_06', 'iTru5_105_F'],
['N18', 'iTru7_401_05', 'iTru5_116_F'],
['N19', 'iTru7_209_02', 'iTru5_101_G'],
['N20', 'iTru7_402_01', 'iTru5_124_F'],
['N21', 'iTru7_209_10', 'iTru5_109_G'],
['N22', 'iTru7_402_09', 'iTru5_120_G'],
['N23', 'iTru7_210_06', 'iTru5_105_H'],
['N24', 'iTru7_115_05', 'iTru5_116_H']],
[['O1', 'iTru7_101_05', 'iTru5_04_A'],
['O2', 'iTru7_109_04', 'iTru5_15_A'],
['O3', 'iTru7_102_01', 'iTru5_12_A'],
['O4', 'iTru7_109_12', 'iTru5_23_A'],
['O5', 'iTru7_102_09', 'iTru5_08_B'],
['O6', 'iTru7_110_08', 'iTru5_19_B'],
['O7', 'iTru7_103_05', 'iTru5_04_C'],
['O8', 'iTru7_111_04', 'iTru5_15_C'],
['O9', 'iTru7_104_01', 'iTru5_12_C'],
['O10', 'iTru7_111_12', 'iTru5_23_C'],
['O11', 'iTru7_104_09', 'iTru5_08_D'],
['O12', 'iTru7_112_08', 'iTru5_19_D'],
['O13', 'iTru7_105_05', 'iTru5_04_E'],
['O14', 'iTru7_113_04', 'iTru5_15_E'],
['O15', 'iTru7_106_01', 'iTru5_12_E'],
['O16', 'iTru7_113_12', 'iTru5_23_E'],
['O17', 'iTru7_106_09', 'iTru5_08_F'],
['O18', 'iTru7_114_08', 'iTru5_19_F'],
['O19', 'iTru7_107_05', 'iTru5_04_G'],
['O20', 'iTru7_201_04', 'iTru5_15_G'],
['O21', 'iTru7_108_01', 'iTru5_12_G'],
['O22', 'iTru7_201_12', 'iTru5_23_G'],
[None, None, None], [None, None, None]],
[['P1', 'iTru7_203_03', 'iTru5_102_A'],
['P2', 'iTru7_301_02', 'iTru5_113_A'],
['P3', 'iTru7_203_11', 'iTru5_110_A'],
['P4', 'iTru7_301_10', 'iTru5_121_A'],
['P5', 'iTru7_204_07', 'iTru5_106_B'],
['P6', 'iTru7_302_06', 'iTru5_117_B'],
['P7', 'iTru7_205_03', 'iTru5_102_C'],
['P8', 'iTru7_303_02', 'iTru5_113_C'],
['P9', 'iTru7_205_11', 'iTru5_110_C'],
['P10', 'iTru7_303_10', 'iTru5_121_C'],
['P11', 'iTru7_206_07', 'iTru5_106_D'],
['P12', 'iTru7_304_06', 'iTru5_117_D'],
['P13', 'iTru7_207_03', 'iTru5_102_E'],
['P14', 'iTru7_305_02', 'iTru5_113_E'],
['P15', 'iTru7_207_11', 'iTru5_110_E'],
['P16', 'iTru7_305_10', 'iTru5_121_E'],
['P17', 'iTru7_208_07', 'iTru5_106_F'],
['P18', 'iTru7_401_06', 'iTru5_117_F'],
['P19', 'iTru7_209_03', 'iTru5_102_G'],
['P20', 'iTru7_402_02', 'iTru5_113_G'],
['P21', 'iTru7_209_11', 'iTru5_110_G'],
['P22', 'iTru7_402_10', 'iTru5_121_G'],
[None, None, None], [None, None, None]]]
# flake8: noqa
NORM_PROCESS_PICKLIST = 'Sample\tSource Plate Name\tSource Plate Type\tSource Well\tConcentration\tTransfer Volume\tDestination Plate Name\tDestination Well\n1.SKB1.640202.Test.plate.1.A1\tWater\t384PP_AQ_BP2_HT\tA1\t12.068\t3085.0\tNormalizedDNA\tA1\n1.SKB2.640194.Test.plate.1.B1\tWater\t384PP_AQ_BP2_HT\tC1\t12.068\t3085.0\tNormalizedDNA\tC1\n1.SKB3.640195.Test.plate.1.C1\tWater\t384PP_AQ_BP2_HT\tE1\t12.068\t3085.0\tNormalizedDNA\tE1\n1.SKB4.640189.Test.plate.1.D1\tWater\t384PP_AQ_BP2_HT\tG1\t12.068\t3085.0\tNormalizedDNA\tG1\n1.SKB5.640181.Test.plate.1.E1\tWater\t384PP_AQ_BP2_HT\tI1\t12.068\t3085.0\tNormalizedDNA\tI1\n1.SKB6.640176.Test.plate.1.F1\tWater\t384PP_AQ_BP2_HT\tK1\t12.068\t3085.0\tNormalizedDNA\tK1\nvibrio.positive.control.Test.plate.1.G1\tWater\t384PP_AQ_BP2_HT\tM1\t6.089\t2680.0\tNormalizedDNA\tM1\nblank.Test.plate.1.H1\tWater\t384PP_AQ_BP2_HT\tO1\t0.342\t0.0\tNormalizedDNA\tO1\n1.SKB1.640202.Test.plate.1.A2\tWater\t384PP_AQ_BP2_HT\tA3\t12.068\t3085.0\tNormalizedDNA\tA3\n1.SKB2.640194.Test.plate.1.B2\tWater\t384PP_AQ_BP2_HT\tC3\t12.068\t3085.0\tNormalizedDNA\tC3\n1.SKB3.640195.Test.plate.1.C2\tWater\t384PP_AQ_BP2_HT\tE3\t12.068\t3085.0\tNormalizedDNA\tE3\n1.SKB4.640189.Test.plate.1.D2\tWater\t384PP_AQ_BP2_HT\tG3\t12.068\t3085.0\tNormalizedDNA\tG3\n1.SKB5.640181.Test.plate.1.E2\tWater\t384PP_AQ_BP2_HT\tI3\t12.068\t3085.0\tNormalizedDNA\tI3\n1.SKB6.640176.Test.plate.1.F2\tWater\t384PP_AQ_BP2_HT\tK3\t12.068\t3085.0\tNormalizedDNA\tK3\nvibrio.positive.control.Test.plate.1.G2\tWater\t384PP_AQ_BP2_HT\tM3\t6.089\t2680.0\tNormalizedDNA\tM3\nblank.Test.plate.1.H2\tWater\t384PP_AQ_BP2_HT\tO3\t0.342\t0.0\tNormalizedDNA\tO3\n1.SKB1.640202.Test.plate.1.A3\tWater\t384PP_AQ_BP2_HT\tA5\t12.068\t3085.0\tNormalizedDNA\tA5\n1.SKB2.640194.Test.plate.1.B3\tWater\t384PP_AQ_BP2_HT\tC5\t12.068\t3085.0\tNormalizedDNA\tC5\n1.SKB3.640195.Test.plate.1.C3\tWater\t384PP_AQ_BP2_HT\tE5\t12.068\t3085.0\tNormalizedDNA\tE5\n1.SKB4.640189.Test.plate.1.D3\tWater\t384PP_AQ_BP2_HT\tG5\t12.068\t3085.0\tNormalizedDNA\tG5\n1.SKB5.640181.Test.plate.1.E3\tWater\t384PP_AQ_BP2_HT\tI5\t12.068\t3085.0\tNormalizedDNA\tI5\n1.SKB6.640176.Test.plate.1.F3\tWater\t384PP_AQ_BP2_HT\tK5\t12.068\t3085.0\tNormalizedDNA\tK5\nvibrio.positive.control.Test.plate.1.G3\tWater\t384PP_AQ_BP2_HT\tM5\t6.089\t2680.0\tNormalizedDNA\tM5\nblank.Test.plate.1.H3\tWater\t384PP_AQ_BP2_HT\tO5\t0.342\t0.0\tNormalizedDNA\tO5\n1.SKB1.640202.Test.plate.1.A4\tWater\t384PP_AQ_BP2_HT\tA7\t12.068\t3085.0\tNormalizedDNA\tA7\n1.SKB2.640194.Test.plate.1.B4\tWater\t384PP_AQ_BP2_HT\tC7\t12.068\t3085.0\tNormalizedDNA\tC7\n1.SKB3.640195.Test.plate.1.C4\tWater\t384PP_AQ_BP2_HT\tE7\t12.068\t3085.0\tNormalizedDNA\tE7\n1.SKB4.640189.Test.plate.1.D4\tWater\t384PP_AQ_BP2_HT\tG7\t12.068\t3085.0\tNormalizedDNA\tG7\n1.SKB5.640181.Test.plate.1.E4\tWater\t384PP_AQ_BP2_HT\tI7\t12.068\t3085.0\tNormalizedDNA\tI7\n1.SKB6.640176.Test.plate.1.F4\tWater\t384PP_AQ_BP2_HT\tK7\t12.068\t3085.0\tNormalizedDNA\tK7\nvibrio.positive.control.Test.plate.1.G4\tWater\t384PP_AQ_BP2_HT\tM7\t6.089\t2680.0\tNormalizedDNA\tM7\nblank.Test.plate.1.H4\tWater\t384PP_AQ_BP2_HT\tO7\t0.342\t0.0\tNormalizedDNA\tO7\n1.SKB1.640202.Test.plate.1.A5\tWater\t384PP_AQ_BP2_HT\tA9\t12.068\t3085.0\tNormalizedDNA\tA9\n1.SKB2.640194.Test.plate.1.B5\tWater\t384PP_AQ_BP2_HT\tC9\t12.068\t3085.0\tNormalizedDNA\tC9\n1.SKB3.640195.Test.plate.1.C5\tWater\t384PP_AQ_BP2_HT\tE9\t12.068\t3085.0\tNormalizedDNA\tE9\n1.SKB4.640189.Test.plate.1.D5\tWater\t384PP_AQ_BP2_HT\tG9\t12.068\t3085.0\tNormalizedDNA\tG9\n1.SKB5.640181.Test.plate.1.E5\tWater\t384PP_AQ_BP2_HT\tI9\t12.068\t3085.0\tNormalizedDNA\tI9\n1.SKB6.640176.Test.plate.1.F5\tWater\t384PP_AQ_BP2_HT\tK9\t12.068\t3085.0\tNormalizedDNA\tK9\nvibrio.positive.control.Test.plate.1.G5\tWater\t384PP_AQ_BP2_HT\tM9\t6.089\t2680.0\tNormalizedDNA\tM9\nblank.Test.plate.1.H5\tWater\t384PP_AQ_BP2_HT\tO9\t0.342\t0.0\tNormalizedDNA\tO9\n1.SKB1.640202.Test.plate.1.A6\tWater\t384PP_AQ_BP2_HT\tA11\t12.068\t3085.0\tNormalizedDNA\tA11\n1.SKB2.640194.Test.plate.1.B6\tWater\t384PP_AQ_BP2_HT\tC11\t12.068\t3085.0\tNormalizedDNA\tC11\n1.SKB3.640195.Test.plate.1.C6\tWater\t384PP_AQ_BP2_HT\tE11\t12.068\t3085.0\tNormalizedDNA\tE11\n1.SKB4.640189.Test.plate.1.D6\tWater\t384PP_AQ_BP2_HT\tG11\t12.068\t3085.0\tNormalizedDNA\tG11\n1.SKB5.640181.Test.plate.1.E6\tWater\t384PP_AQ_BP2_HT\tI11\t12.068\t3085.0\tNormalizedDNA\tI11\n1.SKB6.640176.Test.plate.1.F6\tWater\t384PP_AQ_BP2_HT\tK11\t12.068\t3085.0\tNormalizedDNA\tK11\nvibrio.positive.control.Test.plate.1.G6\tWater\t384PP_AQ_BP2_HT\tM11\t6.089\t2680.0\tNormalizedDNA\tM11\nblank.Test.plate.1.H6\tWater\t384PP_AQ_BP2_HT\tO11\t0.342\t0.0\tNormalizedDNA\tO11\n1.SKB1.640202.Test.plate.1.A7\tWater\t384PP_AQ_BP2_HT\tA13\t12.068\t3085.0\tNormalizedDNA\tA13\n1.SKB2.640194.Test.plate.1.B7\tWater\t384PP_AQ_BP2_HT\tC13\t12.068\t3085.0\tNormalizedDNA\tC13\n1.SKB3.640195.Test.plate.1.C7\tWater\t384PP_AQ_BP2_HT\tE13\t12.068\t3085.0\tNormalizedDNA\tE13\n1.SKB4.640189.Test.plate.1.D7\tWater\t384PP_AQ_BP2_HT\tG13\t12.068\t3085.0\tNormalizedDNA\tG13\n1.SKB5.640181.Test.plate.1.E7\tWater\t384PP_AQ_BP2_HT\tI13\t12.068\t3085.0\tNormalizedDNA\tI13\n1.SKB6.640176.Test.plate.1.F7\tWater\t384PP_AQ_BP2_HT\tK13\t12.068\t3085.0\tNormalizedDNA\tK13\nvibrio.positive.control.Test.plate.1.G7\tWater\t384PP_AQ_BP2_HT\tM13\t6.089\t2680.0\tNormalizedDNA\tM13\nblank.Test.plate.1.H7\tWater\t384PP_AQ_BP2_HT\tO13\t0.342\t0.0\tNormalizedDNA\tO13\n1.SKB1.640202.Test.plate.1.A8\tWater\t384PP_AQ_BP2_HT\tA15\t12.068\t3085.0\tNormalizedDNA\tA15\n1.SKB2.640194.Test.plate.1.B8\tWater\t384PP_AQ_BP2_HT\tC15\t12.068\t3085.0\tNormalizedDNA\tC15\n1.SKB3.640195.Test.plate.1.C8\tWater\t384PP_AQ_BP2_HT\tE15\t12.068\t3085.0\tNormalizedDNA\tE15\n1.SKB4.640189.Test.plate.1.D8\tWater\t384PP_AQ_BP2_HT\tG15\t12.068\t3085.0\tNormalizedDNA\tG15\n1.SKB5.640181.Test.plate.1.E8\tWater\t384PP_AQ_BP2_HT\tI15\t12.068\t3085.0\tNormalizedDNA\tI15\n1.SKB6.640176.Test.plate.1.F8\tWater\t384PP_AQ_BP2_HT\tK15\t12.068\t3085.0\tNormalizedDNA\tK15\nvibrio.positive.control.Test.plate.1.G8\tWater\t384PP_AQ_BP2_HT\tM15\t6.089\t2680.0\tNormalizedDNA\tM15\nblank.Test.plate.1.H8\tWater\t384PP_AQ_BP2_HT\tO15\t0.342\t0.0\tNormalizedDNA\tO15\n1.SKB1.640202.Test.plate.1.A9\tWater\t384PP_AQ_BP2_HT\tA17\t12.068\t3085.0\tNormalizedDNA\tA17\n1.SKB2.640194.Test.plate.1.B9\tWater\t384PP_AQ_BP2_HT\tC17\t12.068\t3085.0\tNormalizedDNA\tC17\n1.SKB3.640195.Test.plate.1.C9\tWater\t384PP_AQ_BP2_HT\tE17\t12.068\t3085.0\tNormalizedDNA\tE17\n1.SKB4.640189.Test.plate.1.D9\tWater\t384PP_AQ_BP2_HT\tG17\t12.068\t3085.0\tNormalizedDNA\tG17\n1.SKB5.640181.Test.plate.1.E9\tWater\t384PP_AQ_BP2_HT\tI17\t12.068\t3085.0\tNormalizedDNA\tI17\n1.SKB6.640176.Test.plate.1.F9\tWater\t384PP_AQ_BP2_HT\tK17\t12.068\t3085.0\tNormalizedDNA\tK17\nvibrio.positive.control.Test.plate.1.G9\tWater\t384PP_AQ_BP2_HT\tM17\t6.089\t2680.0\tNormalizedDNA\tM17\nblank.Test.plate.1.H9\tWater\t384PP_AQ_BP2_HT\tO17\t0.342\t0.0\tNormalizedDNA\tO17\n1.SKB1.640202.Test.plate.1.A10\tWater\t384PP_AQ_BP2_HT\tA19\t12.068\t3085.0\tNormalizedDNA\tA19\n1.SKB2.640194.Test.plate.1.B10\tWater\t384PP_AQ_BP2_HT\tC19\t12.068\t3085.0\tNormalizedDNA\tC19\n1.SKB3.640195.Test.plate.1.C10\tWater\t384PP_AQ_BP2_HT\tE19\t12.068\t3085.0\tNormalizedDNA\tE19\n1.SKB4.640189.Test.plate.1.D10\tWater\t384PP_AQ_BP2_HT\tG19\t12.068\t3085.0\tNormalizedDNA\tG19\n1.SKB5.640181.Test.plate.1.E10\tWater\t384PP_AQ_BP2_HT\tI19\t12.068\t3085.0\tNormalizedDNA\tI19\n1.SKB6.640176.Test.plate.1.F10\tWater\t384PP_AQ_BP2_HT\tK19\t12.068\t3085.0\tNormalizedDNA\tK19\nvibrio.positive.control.Test.plate.1.G10\tWater\t384PP_AQ_BP2_HT\tM19\t6.089\t2680.0\tNormalizedDNA\tM19\nblank.Test.plate.1.H10\tWater\t384PP_AQ_BP2_HT\tO19\t0.342\t0.0\tNormalizedDNA\tO19\n1.SKB1.640202.Test.plate.1.A11\tWater\t384PP_AQ_BP2_HT\tA21\t12.068\t3085.0\tNormalizedDNA\tA21\n1.SKB2.640194.Test.plate.1.B11\tWater\t384PP_AQ_BP2_HT\tC21\t12.068\t3085.0\tNormalizedDNA\tC21\n1.SKB3.640195.Test.plate.1.C11\tWater\t384PP_AQ_BP2_HT\tE21\t12.068\t3085.0\tNormalizedDNA\tE21\n1.SKB4.640189.Test.plate.1.D11\tWater\t384PP_AQ_BP2_HT\tG21\t12.068\t3085.0\tNormalizedDNA\tG21\n1.SKB5.640181.Test.plate.1.E11\tWater\t384PP_AQ_BP2_HT\tI21\t12.068\t3085.0\tNormalizedDNA\tI21\n1.SKB6.640176.Test.plate.1.F11\tWater\t384PP_AQ_BP2_HT\tK21\t12.068\t3085.0\tNormalizedDNA\tK21\nvibrio.positive.control.Test.plate.1.G11\tWater\t384PP_AQ_BP2_HT\tM21\t6.089\t2680.0\tNormalizedDNA\tM21\nblank.Test.plate.1.H11\tWater\t384PP_AQ_BP2_HT\tO21\t0.342\t0.0\tNormalizedDNA\tO21\n1.SKB1.640202.Test.plate.1.A12\tWater\t384PP_AQ_BP2_HT\tA23\t12.068\t3085.0\tNormalizedDNA\tA23\n1.SKB2.640194.Test.plate.1.B12\tWater\t384PP_AQ_BP2_HT\tC23\t12.068\t3085.0\tNormalizedDNA\tC23\n1.SKB3.640195.Test.plate.1.C12\tWater\t384PP_AQ_BP2_HT\tE23\t12.068\t3085.0\tNormalizedDNA\tE23\n1.SKB4.640189.Test.plate.1.D12\tWater\t384PP_AQ_BP2_HT\tG23\t12.068\t3085.0\tNormalizedDNA\tG23\n1.SKB5.640181.Test.plate.1.E12\tWater\t384PP_AQ_BP2_HT\tI23\t12.068\t3085.0\tNormalizedDNA\tI23\n1.SKB8.640193.Test.plate.1.F12\tWater\t384PP_AQ_BP2_HT\tK23\t12.068\t3085.0\tNormalizedDNA\tK23\nvibrio.positive.control.Test.plate.1.G12\tWater\t384PP_AQ_BP2_HT\tM23\t6.089\t2680.0\tNormalizedDNA\tM23\n1.SKB1.640202.Test.plate.2.A1\tWater\t384PP_AQ_BP2_HT\tA2\t12.068\t3085.0\tNormalizedDNA\tA2\n1.SKB2.640194.Test.plate.2.B1\tWater\t384PP_AQ_BP2_HT\tC2\t12.068\t3085.0\tNormalizedDNA\tC2\n1.SKB3.640195.Test.plate.2.C1\tWater\t384PP_AQ_BP2_HT\tE2\t12.068\t3085.0\tNormalizedDNA\tE2\n1.SKB4.640189.Test.plate.2.D1\tWater\t384PP_AQ_BP2_HT\tG2\t12.068\t3085.0\tNormalizedDNA\tG2\n1.SKB5.640181.Test.plate.2.E1\tWater\t384PP_AQ_BP2_HT\tI2\t12.068\t3085.0\tNormalizedDNA\tI2\n1.SKB6.640176.Test.plate.2.F1\tWater\t384PP_AQ_BP2_HT\tK2\t12.068\t3085.0\tNormalizedDNA\tK2\nvibrio.positive.control.Test.plate.2.G1\tWater\t384PP_AQ_BP2_HT\tM2\t6.089\t2680.0\tNormalizedDNA\tM2\nblank.Test.plate.2.H1\tWater\t384PP_AQ_BP2_HT\tO2\t0.342\t0.0\tNormalizedDNA\tO2\n1.SKB1.640202.Test.plate.2.A2\tWater\t384PP_AQ_BP2_HT\tA4\t12.068\t3085.0\tNormalizedDNA\tA4\n1.SKB2.640194.Test.plate.2.B2\tWater\t384PP_AQ_BP2_HT\tC4\t12.068\t3085.0\tNormalizedDNA\tC4\n1.SKB3.640195.Test.plate.2.C2\tWater\t384PP_AQ_BP2_HT\tE4\t12.068\t3085.0\tNormalizedDNA\tE4\n1.SKB4.640189.Test.plate.2.D2\tWater\t384PP_AQ_BP2_HT\tG4\t12.068\t3085.0\tNormalizedDNA\tG4\n1.SKB5.640181.Test.plate.2.E2\tWater\t384PP_AQ_BP2_HT\tI4\t12.068\t3085.0\tNormalizedDNA\tI4\n1.SKB6.640176.Test.plate.2.F2\tWater\t384PP_AQ_BP2_HT\tK4\t12.068\t3085.0\tNormalizedDNA\tK4\nvibrio.positive.control.Test.plate.2.G2\tWater\t384PP_AQ_BP2_HT\tM4\t6.089\t2680.0\tNormalizedDNA\tM4\nblank.Test.plate.2.H2\tWater\t384PP_AQ_BP2_HT\tO4\t0.342\t0.0\tNormalizedDNA\tO4\n1.SKB1.640202.Test.plate.2.A3\tWater\t384PP_AQ_BP2_HT\tA6\t12.068\t3085.0\tNormalizedDNA\tA6\n1.SKB2.640194.Test.plate.2.B3\tWater\t384PP_AQ_BP2_HT\tC6\t12.068\t3085.0\tNormalizedDNA\tC6\n1.SKB3.640195.Test.plate.2.C3\tWater\t384PP_AQ_BP2_HT\tE6\t12.068\t3085.0\tNormalizedDNA\tE6\n1.SKB4.640189.Test.plate.2.D3\tWater\t384PP_AQ_BP2_HT\tG6\t12.068\t3085.0\tNormalizedDNA\tG6\n1.SKB5.640181.Test.plate.2.E3\tWater\t384PP_AQ_BP2_HT\tI6\t12.068\t3085.0\tNormalizedDNA\tI6\n1.SKB6.640176.Test.plate.2.F3\tWater\t384PP_AQ_BP2_HT\tK6\t12.068\t3085.0\tNormalizedDNA\tK6\nvibrio.positive.control.Test.plate.2.G3\tWater\t384PP_AQ_BP2_HT\tM6\t6.089\t2680.0\tNormalizedDNA\tM6\nblank.Test.plate.2.H3\tWater\t384PP_AQ_BP2_HT\tO6\t0.342\t0.0\tNormalizedDNA\tO6\n1.SKB1.640202.Test.plate.2.A4\tWater\t384PP_AQ_BP2_HT\tA8\t12.068\t3085.0\tNormalizedDNA\tA8\n1.SKB2.640194.Test.plate.2.B4\tWater\t384PP_AQ_BP2_HT\tC8\t12.068\t3085.0\tNormalizedDNA\tC8\n1.SKB3.640195.Test.plate.2.C4\tWater\t384PP_AQ_BP2_HT\tE8\t12.068\t3085.0\tNormalizedDNA\tE8\n1.SKB4.640189.Test.plate.2.D4\tWater\t384PP_AQ_BP2_HT\tG8\t12.068\t3085.0\tNormalizedDNA\tG8\n1.SKB5.640181.Test.plate.2.E4\tWater\t384PP_AQ_BP2_HT\tI8\t12.068\t3085.0\tNormalizedDNA\tI8\n1.SKB6.640176.Test.plate.2.F4\tWater\t384PP_AQ_BP2_HT\tK8\t12.068\t3085.0\tNormalizedDNA\tK8\nvibrio.positive.control.Test.plate.2.G4\tWater\t384PP_AQ_BP2_HT\tM8\t6.089\t2680.0\tNormalizedDNA\tM8\nblank.Test.plate.2.H4\tWater\t384PP_AQ_BP2_HT\tO8\t0.342\t0.0\tNormalizedDNA\tO8\n1.SKB1.640202.Test.plate.2.A5\tWater\t384PP_AQ_BP2_HT\tA10\t12.068\t3085.0\tNormalizedDNA\tA10\n1.SKB2.640194.Test.plate.2.B5\tWater\t384PP_AQ_BP2_HT\tC10\t12.068\t3085.0\tNormalizedDNA\tC10\n1.SKB3.640195.Test.plate.2.C5\tWater\t384PP_AQ_BP2_HT\tE10\t12.068\t3085.0\tNormalizedDNA\tE10\n1.SKB4.640189.Test.plate.2.D5\tWater\t384PP_AQ_BP2_HT\tG10\t12.068\t3085.0\tNormalizedDNA\tG10\n1.SKB5.640181.Test.plate.2.E5\tWater\t384PP_AQ_BP2_HT\tI10\t12.068\t3085.0\tNormalizedDNA\tI10\n1.SKB6.640176.Test.plate.2.F5\tWater\t384PP_AQ_BP2_HT\tK10\t12.068\t3085.0\tNormalizedDNA\tK10\nvibrio.positive.control.Test.plate.2.G5\tWater\t384PP_AQ_BP2_HT\tM10\t6.089\t2680.0\tNormalizedDNA\tM10\nblank.Test.plate.2.H5\tWater\t384PP_AQ_BP2_HT\tO10\t0.342\t0.0\tNormalizedDNA\tO10\n1.SKB1.640202.Test.plate.2.A6\tWater\t384PP_AQ_BP2_HT\tA12\t12.068\t3085.0\tNormalizedDNA\tA12\n1.SKB2.640194.Test.plate.2.B6\tWater\t384PP_AQ_BP2_HT\tC12\t12.068\t3085.0\tNormalizedDNA\tC12\n1.SKB3.640195.Test.plate.2.C6\tWater\t384PP_AQ_BP2_HT\tE12\t12.068\t3085.0\tNormalizedDNA\tE12\n1.SKB4.640189.Test.plate.2.D6\tWater\t384PP_AQ_BP2_HT\tG12\t12.068\t3085.0\tNormalizedDNA\tG12\n1.SKB5.640181.Test.plate.2.E6\tWater\t384PP_AQ_BP2_HT\tI12\t12.068\t3085.0\tNormalizedDNA\tI12\n1.SKB6.640176.Test.plate.2.F6\tWater\t384PP_AQ_BP2_HT\tK12\t12.068\t3085.0\tNormalizedDNA\tK12\nvibrio.positive.control.Test.plate.2.G6\tWater\t384PP_AQ_BP2_HT\tM12\t6.089\t2680.0\tNormalizedDNA\tM12\nblank.Test.plate.2.H6\tWater\t384PP_AQ_BP2_HT\tO12\t0.342\t0.0\tNormalizedDNA\tO12\n1.SKB1.640202.Test.plate.2.A7\tWater\t384PP_AQ_BP2_HT\tA14\t12.068\t3085.0\tNormalizedDNA\tA14\n1.SKB2.640194.Test.plate.2.B7\tWater\t384PP_AQ_BP2_HT\tC14\t12.068\t3085.0\tNormalizedDNA\tC14\n1.SKB3.640195.Test.plate.2.C7\tWater\t384PP_AQ_BP2_HT\tE14\t12.068\t3085.0\tNormalizedDNA\tE14\n1.SKB4.640189.Test.plate.2.D7\tWater\t384PP_AQ_BP2_HT\tG14\t12.068\t3085.0\tNormalizedDNA\tG14\n1.SKB5.640181.Test.plate.2.E7\tWater\t384PP_AQ_BP2_HT\tI14\t12.068\t3085.0\tNormalizedDNA\tI14\n1.SKB6.640176.Test.plate.2.F7\tWater\t384PP_AQ_BP2_HT\tK14\t12.068\t3085.0\tNormalizedDNA\tK14\nvibrio.positive.control.Test.plate.2.G7\tWater\t384PP_AQ_BP2_HT\tM14\t6.089\t2680.0\tNormalizedDNA\tM14\nblank.Test.plate.2.H7\tWater\t384PP_AQ_BP2_HT\tO14\t0.342\t0.0\tNormalizedDNA\tO14\n1.SKB1.640202.Test.plate.2.A8\tWater\t384PP_AQ_BP2_HT\tA16\t12.068\t3085.0\tNormalizedDNA\tA16\n1.SKB2.640194.Test.plate.2.B8\tWater\t384PP_AQ_BP2_HT\tC16\t12.068\t3085.0\tNormalizedDNA\tC16\n1.SKB3.640195.Test.plate.2.C8\tWater\t384PP_AQ_BP2_HT\tE16\t12.068\t3085.0\tNormalizedDNA\tE16\n1.SKB4.640189.Test.plate.2.D8\tWater\t384PP_AQ_BP2_HT\tG16\t12.068\t3085.0\tNormalizedDNA\tG16\n1.SKB5.640181.Test.plate.2.E8\tWater\t384PP_AQ_BP2_HT\tI16\t12.068\t3085.0\tNormalizedDNA\tI16\n1.SKB6.640176.Test.plate.2.F8\tWater\t384PP_AQ_BP2_HT\tK16\t12.068\t3085.0\tNormalizedDNA\tK16\nvibrio.positive.control.Test.plate.2.G8\tWater\t384PP_AQ_BP2_HT\tM16\t6.089\t2680.0\tNormalizedDNA\tM16\nblank.Test.plate.2.H8\tWater\t384PP_AQ_BP2_HT\tO16\t0.342\t0.0\tNormalizedDNA\tO16\n1.SKB1.640202.Test.plate.2.A9\tWater\t384PP_AQ_BP2_HT\tA18\t12.068\t3085.0\tNormalizedDNA\tA18\n1.SKB2.640194.Test.plate.2.B9\tWater\t384PP_AQ_BP2_HT\tC18\t12.068\t3085.0\tNormalizedDNA\tC18\n1.SKB3.640195.Test.plate.2.C9\tWater\t384PP_AQ_BP2_HT\tE18\t12.068\t3085.0\tNormalizedDNA\tE18\n1.SKB4.640189.Test.plate.2.D9\tWater\t384PP_AQ_BP2_HT\tG18\t12.068\t3085.0\tNormalizedDNA\tG18\n1.SKB5.640181.Test.plate.2.E9\tWater\t384PP_AQ_BP2_HT\tI18\t12.068\t3085.0\tNormalizedDNA\tI18\n1.SKB6.640176.Test.plate.2.F9\tWater\t384PP_AQ_BP2_HT\tK18\t12.068\t3085.0\tNormalizedDNA\tK18\nvibrio.positive.control.Test.plate.2.G9\tWater\t384PP_AQ_BP2_HT\tM18\t6.089\t2680.0\tNormalizedDNA\tM18\nblank.Test.plate.2.H9\tWater\t384PP_AQ_BP2_HT\tO18\t0.342\t0.0\tNormalizedDNA\tO18\n1.SKB1.640202.Test.plate.2.A10\tWater\t384PP_AQ_BP2_HT\tA20\t12.068\t3085.0\tNormalizedDNA\tA20\n1.SKB2.640194.Test.plate.2.B10\tWater\t384PP_AQ_BP2_HT\tC20\t12.068\t3085.0\tNormalizedDNA\tC20\n1.SKB3.640195.Test.plate.2.C10\tWater\t384PP_AQ_BP2_HT\tE20\t12.068\t3085.0\tNormalizedDNA\tE20\n1.SKB4.640189.Test.plate.2.D10\tWater\t384PP_AQ_BP2_HT\tG20\t12.068\t3085.0\tNormalizedDNA\tG20\n1.SKB5.640181.Test.plate.2.E10\tWater\t384PP_AQ_BP2_HT\tI20\t12.068\t3085.0\tNormalizedDNA\tI20\n1.SKB6.640176.Test.plate.2.F10\tWater\t384PP_AQ_BP2_HT\tK20\t12.068\t3085.0\tNormalizedDNA\tK20\nvibrio.positive.control.Test.plate.2.G10\tWater\t384PP_AQ_BP2_HT\tM20\t6.089\t2680.0\tNormalizedDNA\tM20\nblank.Test.plate.2.H10\tWater\t384PP_AQ_BP2_HT\tO20\t0.342\t0.0\tNormalizedDNA\tO20\n1.SKB1.640202.Test.plate.2.A11\tWater\t384PP_AQ_BP2_HT\tA22\t12.068\t3085.0\tNormalizedDNA\tA22\n1.SKB2.640194.Test.plate.2.B11\tWater\t384PP_AQ_BP2_HT\tC22\t12.068\t3085.0\tNormalizedDNA\tC22\n1.SKB3.640195.Test.plate.2.C11\tWater\t384PP_AQ_BP2_HT\tE22\t12.068\t3085.0\tNormalizedDNA\tE22\n1.SKB4.640189.Test.plate.2.D11\tWater\t384PP_AQ_BP2_HT\tG22\t12.068\t3085.0\tNormalizedDNA\tG22\n1.SKB5.640181.Test.plate.2.E11\tWater\t384PP_AQ_BP2_HT\tI22\t12.068\t3085.0\tNormalizedDNA\tI22\n1.SKB6.640176.Test.plate.2.F11\tWater\t384PP_AQ_BP2_HT\tK22\t12.068\t3085.0\tNormalizedDNA\tK22\nvibrio.positive.control.Test.plate.2.G11\tWater\t384PP_AQ_BP2_HT\tM22\t6.089\t2680.0\tNormalizedDNA\tM22\nblank.Test.plate.2.H11\tWater\t384PP_AQ_BP2_HT\tO22\t0.342\t0.0\tNormalizedDNA\tO22\n1.SKB1.640202.Test.plate.2.A12\tWater\t384PP_AQ_BP2_HT\tA24\t12.068\t3085.0\tNormalizedDNA\tA24\n1.SKB2.640194.Test.plate.2.B12\tWater\t384PP_AQ_BP2_HT\tC24\t12.068\t3085.0\tNormalizedDNA\tC24\n1.SKB3.640195.Test.plate.2.C12\tWater\t384PP_AQ_BP2_HT\tE24\t12.068\t3085.0\tNormalizedDNA\tE24\n1.SKB4.640189.Test.plate.2.D12\tWater\t384PP_AQ_BP2_HT\tG24\t12.068\t3085.0\tNormalizedDNA\tG24\n1.SKB5.640181.Test.plate.2.E12\tWater\t384PP_AQ_BP2_HT\tI24\t12.068\t3085.0\tNormalizedDNA\tI24\n1.SKD1.640179.Test.plate.2.F12\tWater\t384PP_AQ_BP2_HT\tK24\t12.068\t3085.0\tNormalizedDNA\tK24\nvibrio.positive.control.Test.plate.2.G12\tWater\t384PP_AQ_BP2_HT\tM24\t6.089\t2680.0\tNormalizedDNA\tM24\n1.SKB1.640202.Test.plate.3.A1\tWater\t384PP_AQ_BP2_HT\tB1\t12.068\t3085.0\tNormalizedDNA\tB1\n1.SKB2.640194.Test.plate.3.B1\tWater\t384PP_AQ_BP2_HT\tD1\t12.068\t3085.0\tNormalizedDNA\tD1\n1.SKB3.640195.Test.plate.3.C1\tWater\t384PP_AQ_BP2_HT\tF1\t12.068\t3085.0\tNormalizedDNA\tF1\n1.SKB4.640189.Test.plate.3.D1\tWater\t384PP_AQ_BP2_HT\tH1\t12.068\t3085.0\tNormalizedDNA\tH1\n1.SKB5.640181.Test.plate.3.E1\tWater\t384PP_AQ_BP2_HT\tJ1\t12.068\t3085.0\tNormalizedDNA\tJ1\n1.SKB6.640176.Test.plate.3.F1\tWater\t384PP_AQ_BP2_HT\tL1\t12.068\t3085.0\tNormalizedDNA\tL1\nvibrio.positive.control.Test.plate.3.G1\tWater\t384PP_AQ_BP2_HT\tN1\t6.089\t2680.0\tNormalizedDNA\tN1\nblank.Test.plate.3.H1\tWater\t384PP_AQ_BP2_HT\tP1\t0.342\t0.0\tNormalizedDNA\tP1\n1.SKB1.640202.Test.plate.3.A2\tWater\t384PP_AQ_BP2_HT\tB3\t12.068\t3085.0\tNormalizedDNA\tB3\n1.SKB2.640194.Test.plate.3.B2\tWater\t384PP_AQ_BP2_HT\tD3\t12.068\t3085.0\tNormalizedDNA\tD3\n1.SKB3.640195.Test.plate.3.C2\tWater\t384PP_AQ_BP2_HT\tF3\t12.068\t3085.0\tNormalizedDNA\tF3\n1.SKB4.640189.Test.plate.3.D2\tWater\t384PP_AQ_BP2_HT\tH3\t12.068\t3085.0\tNormalizedDNA\tH3\n1.SKB5.640181.Test.plate.3.E2\tWater\t384PP_AQ_BP2_HT\tJ3\t12.068\t3085.0\tNormalizedDNA\tJ3\n1.SKB6.640176.Test.plate.3.F2\tWater\t384PP_AQ_BP2_HT\tL3\t12.068\t3085.0\tNormalizedDNA\tL3\nvibrio.positive.control.Test.plate.3.G2\tWater\t384PP_AQ_BP2_HT\tN3\t6.089\t2680.0\tNormalizedDNA\tN3\nblank.Test.plate.3.H2\tWater\t384PP_AQ_BP2_HT\tP3\t0.342\t0.0\tNormalizedDNA\tP3\n1.SKB1.640202.Test.plate.3.A3\tWater\t384PP_AQ_BP2_HT\tB5\t12.068\t3085.0\tNormalizedDNA\tB5\n1.SKB2.640194.Test.plate.3.B3\tWater\t384PP_AQ_BP2_HT\tD5\t12.068\t3085.0\tNormalizedDNA\tD5\n1.SKB3.640195.Test.plate.3.C3\tWater\t384PP_AQ_BP2_HT\tF5\t12.068\t3085.0\tNormalizedDNA\tF5\n1.SKB4.640189.Test.plate.3.D3\tWater\t384PP_AQ_BP2_HT\tH5\t12.068\t3085.0\tNormalizedDNA\tH5\n1.SKB5.640181.Test.plate.3.E3\tWater\t384PP_AQ_BP2_HT\tJ5\t12.068\t3085.0\tNormalizedDNA\tJ5\n1.SKB6.640176.Test.plate.3.F3\tWater\t384PP_AQ_BP2_HT\tL5\t12.068\t3085.0\tNormalizedDNA\tL5\nvibrio.positive.control.Test.plate.3.G3\tWater\t384PP_AQ_BP2_HT\tN5\t6.089\t2680.0\tNormalizedDNA\tN5\nblank.Test.plate.3.H3\tWater\t384PP_AQ_BP2_HT\tP5\t0.342\t0.0\tNormalizedDNA\tP5\n1.SKB1.640202.Test.plate.3.A4\tWater\t384PP_AQ_BP2_HT\tB7\t12.068\t3085.0\tNormalizedDNA\tB7\n1.SKB2.640194.Test.plate.3.B4\tWater\t384PP_AQ_BP2_HT\tD7\t12.068\t3085.0\tNormalizedDNA\tD7\n1.SKB3.640195.Test.plate.3.C4\tWater\t384PP_AQ_BP2_HT\tF7\t12.068\t3085.0\tNormalizedDNA\tF7\n1.SKB4.640189.Test.plate.3.D4\tWater\t384PP_AQ_BP2_HT\tH7\t12.068\t3085.0\tNormalizedDNA\tH7\n1.SKB5.640181.Test.plate.3.E4\tWater\t384PP_AQ_BP2_HT\tJ7\t12.068\t3085.0\tNormalizedDNA\tJ7\n1.SKB6.640176.Test.plate.3.F4\tWater\t384PP_AQ_BP2_HT\tL7\t12.068\t3085.0\tNormalizedDNA\tL7\nvibrio.positive.control.Test.plate.3.G4\tWater\t384PP_AQ_BP2_HT\tN7\t6.089\t2680.0\tNormalizedDNA\tN7\nblank.Test.plate.3.H4\tWater\t384PP_AQ_BP2_HT\tP7\t0.342\t0.0\tNormalizedDNA\tP7\n1.SKB1.640202.Test.plate.3.A5\tWater\t384PP_AQ_BP2_HT\tB9\t12.068\t3085.0\tNormalizedDNA\tB9\n1.SKB2.640194.Test.plate.3.B5\tWater\t384PP_AQ_BP2_HT\tD9\t12.068\t3085.0\tNormalizedDNA\tD9\n1.SKB3.640195.Test.plate.3.C5\tWater\t384PP_AQ_BP2_HT\tF9\t12.068\t3085.0\tNormalizedDNA\tF9\n1.SKB4.640189.Test.plate.3.D5\tWater\t384PP_AQ_BP2_HT\tH9\t12.068\t3085.0\tNormalizedDNA\tH9\n1.SKB5.640181.Test.plate.3.E5\tWater\t384PP_AQ_BP2_HT\tJ9\t12.068\t3085.0\tNormalizedDNA\tJ9\n1.SKB6.640176.Test.plate.3.F5\tWater\t384PP_AQ_BP2_HT\tL9\t12.068\t3085.0\tNormalizedDNA\tL9\nvibrio.positive.control.Test.plate.3.G5\tWater\t384PP_AQ_BP2_HT\tN9\t6.089\t2680.0\tNormalizedDNA\tN9\nblank.Test.plate.3.H5\tWater\t384PP_AQ_BP2_HT\tP9\t0.342\t0.0\tNormalizedDNA\tP9\n1.SKB1.640202.Test.plate.3.A6\tWater\t384PP_AQ_BP2_HT\tB11\t12.068\t3085.0\tNormalizedDNA\tB11\n1.SKB2.640194.Test.plate.3.B6\tWater\t384PP_AQ_BP2_HT\tD11\t12.068\t3085.0\tNormalizedDNA\tD11\n1.SKB3.640195.Test.plate.3.C6\tWater\t384PP_AQ_BP2_HT\tF11\t12.068\t3085.0\tNormalizedDNA\tF11\n1.SKB4.640189.Test.plate.3.D6\tWater\t384PP_AQ_BP2_HT\tH11\t12.068\t3085.0\tNormalizedDNA\tH11\n1.SKB5.640181.Test.plate.3.E6\tWater\t384PP_AQ_BP2_HT\tJ11\t12.068\t3085.0\tNormalizedDNA\tJ11\n1.SKB6.640176.Test.plate.3.F6\tWater\t384PP_AQ_BP2_HT\tL11\t12.068\t3085.0\tNormalizedDNA\tL11\nvibrio.positive.control.Test.plate.3.G6\tWater\t384PP_AQ_BP2_HT\tN11\t6.089\t2680.0\tNormalizedDNA\tN11\nblank.Test.plate.3.H6\tWater\t384PP_AQ_BP2_HT\tP11\t0.342\t0.0\tNormalizedDNA\tP11\n1.SKB1.640202.Test.plate.3.A7\tWater\t384PP_AQ_BP2_HT\tB13\t12.068\t3085.0\tNormalizedDNA\tB13\n1.SKB2.640194.Test.plate.3.B7\tWater\t384PP_AQ_BP2_HT\tD13\t12.068\t3085.0\tNormalizedDNA\tD13\n1.SKB3.640195.Test.plate.3.C7\tWater\t384PP_AQ_BP2_HT\tF13\t12.068\t3085.0\tNormalizedDNA\tF13\n1.SKB4.640189.Test.plate.3.D7\tWater\t384PP_AQ_BP2_HT\tH13\t12.068\t3085.0\tNormalizedDNA\tH13\n1.SKB5.640181.Test.plate.3.E7\tWater\t384PP_AQ_BP2_HT\tJ13\t12.068\t3085.0\tNormalizedDNA\tJ13\n1.SKB6.640176.Test.plate.3.F7\tWater\t384PP_AQ_BP2_HT\tL13\t12.068\t3085.0\tNormalizedDNA\tL13\nvibrio.positive.control.Test.plate.3.G7\tWater\t384PP_AQ_BP2_HT\tN13\t6.089\t2680.0\tNormalizedDNA\tN13\nblank.Test.plate.3.H7\tWater\t384PP_AQ_BP2_HT\tP13\t0.342\t0.0\tNormalizedDNA\tP13\n1.SKB1.640202.Test.plate.3.A8\tWater\t384PP_AQ_BP2_HT\tB15\t12.068\t3085.0\tNormalizedDNA\tB15\n1.SKB2.640194.Test.plate.3.B8\tWater\t384PP_AQ_BP2_HT\tD15\t12.068\t3085.0\tNormalizedDNA\tD15\n1.SKB3.640195.Test.plate.3.C8\tWater\t384PP_AQ_BP2_HT\tF15\t12.068\t3085.0\tNormalizedDNA\tF15\n1.SKB4.640189.Test.plate.3.D8\tWater\t384PP_AQ_BP2_HT\tH15\t12.068\t3085.0\tNormalizedDNA\tH15\n1.SKB5.640181.Test.plate.3.E8\tWater\t384PP_AQ_BP2_HT\tJ15\t12.068\t3085.0\tNormalizedDNA\tJ15\n1.SKB6.640176.Test.plate.3.F8\tWater\t384PP_AQ_BP2_HT\tL15\t12.068\t3085.0\tNormalizedDNA\tL15\nvibrio.positive.control.Test.plate.3.G8\tWater\t384PP_AQ_BP2_HT\tN15\t6.089\t2680.0\tNormalizedDNA\tN15\nblank.Test.plate.3.H8\tWater\t384PP_AQ_BP2_HT\tP15\t0.342\t0.0\tNormalizedDNA\tP15\n1.SKB1.640202.Test.plate.3.A9\tWater\t384PP_AQ_BP2_HT\tB17\t12.068\t3085.0\tNormalizedDNA\tB17\n1.SKB2.640194.Test.plate.3.B9\tWater\t384PP_AQ_BP2_HT\tD17\t12.068\t3085.0\tNormalizedDNA\tD17\n1.SKB3.640195.Test.plate.3.C9\tWater\t384PP_AQ_BP2_HT\tF17\t12.068\t3085.0\tNormalizedDNA\tF17\n1.SKB4.640189.Test.plate.3.D9\tWater\t384PP_AQ_BP2_HT\tH17\t12.068\t3085.0\tNormalizedDNA\tH17\n1.SKB5.640181.Test.plate.3.E9\tWater\t384PP_AQ_BP2_HT\tJ17\t12.068\t3085.0\tNormalizedDNA\tJ17\n1.SKB6.640176.Test.plate.3.F9\tWater\t384PP_AQ_BP2_HT\tL17\t12.068\t3085.0\tNormalizedDNA\tL17\nvibrio.positive.control.Test.plate.3.G9\tWater\t384PP_AQ_BP2_HT\tN17\t6.089\t2680.0\tNormalizedDNA\tN17\nblank.Test.plate.3.H9\tWater\t384PP_AQ_BP2_HT\tP17\t0.342\t0.0\tNormalizedDNA\tP17\n1.SKB1.640202.Test.plate.3.A10\tWater\t384PP_AQ_BP2_HT\tB19\t12.068\t3085.0\tNormalizedDNA\tB19\n1.SKB2.640194.Test.plate.3.B10\tWater\t384PP_AQ_BP2_HT\tD19\t12.068\t3085.0\tNormalizedDNA\tD19\n1.SKB3.640195.Test.plate.3.C10\tWater\t384PP_AQ_BP2_HT\tF19\t12.068\t3085.0\tNormalizedDNA\tF19\n1.SKB4.640189.Test.plate.3.D10\tWater\t384PP_AQ_BP2_HT\tH19\t12.068\t3085.0\tNormalizedDNA\tH19\n1.SKB5.640181.Test.plate.3.E10\tWater\t384PP_AQ_BP2_HT\tJ19\t12.068\t3085.0\tNormalizedDNA\tJ19\n1.SKB6.640176.Test.plate.3.F10\tWater\t384PP_AQ_BP2_HT\tL19\t12.068\t3085.0\tNormalizedDNA\tL19\nvibrio.positive.control.Test.plate.3.G10\tWater\t384PP_AQ_BP2_HT\tN19\t6.089\t2680.0\tNormalizedDNA\tN19\nblank.Test.plate.3.H10\tWater\t384PP_AQ_BP2_HT\tP19\t0.342\t0.0\tNormalizedDNA\tP19\n1.SKB1.640202.Test.plate.3.A11\tWater\t384PP_AQ_BP2_HT\tB21\t12.068\t3085.0\tNormalizedDNA\tB21\n1.SKB2.640194.Test.plate.3.B11\tWater\t384PP_AQ_BP2_HT\tD21\t12.068\t3085.0\tNormalizedDNA\tD21\n1.SKB3.640195.Test.plate.3.C11\tWater\t384PP_AQ_BP2_HT\tF21\t12.068\t3085.0\tNormalizedDNA\tF21\n1.SKB4.640189.Test.plate.3.D11\tWater\t384PP_AQ_BP2_HT\tH21\t12.068\t3085.0\tNormalizedDNA\tH21\n1.SKB5.640181.Test.plate.3.E11\tWater\t384PP_AQ_BP2_HT\tJ21\t12.068\t3085.0\tNormalizedDNA\tJ21\n1.SKB6.640176.Test.plate.3.F11\tWater\t384PP_AQ_BP2_HT\tL21\t12.068\t3085.0\tNormalizedDNA\tL21\nvibrio.positive.control.Test.plate.3.G11\tWater\t384PP_AQ_BP2_HT\tN21\t6.089\t2680.0\tNormalizedDNA\tN21\nblank.Test.plate.3.H11\tWater\t384PP_AQ_BP2_HT\tP21\t0.342\t0.0\tNormalizedDNA\tP21\n1.SKB1.640202.Test.plate.3.A12\tWater\t384PP_AQ_BP2_HT\tB23\t12.068\t3085.0\tNormalizedDNA\tB23\n1.SKB2.640194.Test.plate.3.B12\tWater\t384PP_AQ_BP2_HT\tD23\t12.068\t3085.0\tNormalizedDNA\tD23\n1.SKB3.640195.Test.plate.3.C12\tWater\t384PP_AQ_BP2_HT\tF23\t12.068\t3085.0\tNormalizedDNA\tF23\n1.SKB4.640189.Test.plate.3.D12\tWater\t384PP_AQ_BP2_HT\tH23\t12.068\t3085.0\tNormalizedDNA\tH23\n1.SKB5.640181.Test.plate.3.E12\tWater\t384PP_AQ_BP2_HT\tJ23\t12.068\t3085.0\tNormalizedDNA\tJ23\n1.SKD5.640186.Test.plate.3.F12\tWater\t384PP_AQ_BP2_HT\tL23\t12.068\t3085.0\tNormalizedDNA\tL23\nvibrio.positive.control.Test.plate.3.G12\tWater\t384PP_AQ_BP2_HT\tN23\t6.089\t2680.0\tNormalizedDNA\tN23\n1.SKB1.640202.Test.plate.4.A1\tWater\t384PP_AQ_BP2_HT\tB2\t12.068\t3085.0\tNormalizedDNA\tB2\n1.SKB2.640194.Test.plate.4.B1\tWater\t384PP_AQ_BP2_HT\tD2\t12.068\t3085.0\tNormalizedDNA\tD2\n1.SKB3.640195.Test.plate.4.C1\tWater\t384PP_AQ_BP2_HT\tF2\t12.068\t3085.0\tNormalizedDNA\tF2\n1.SKB4.640189.Test.plate.4.D1\tWater\t384PP_AQ_BP2_HT\tH2\t12.068\t3085.0\tNormalizedDNA\tH2\n1.SKB5.640181.Test.plate.4.E1\tWater\t384PP_AQ_BP2_HT\tJ2\t12.068\t3085.0\tNormalizedDNA\tJ2\n1.SKB6.640176.Test.plate.4.F1\tWater\t384PP_AQ_BP2_HT\tL2\t12.068\t3085.0\tNormalizedDNA\tL2\nvibrio.positive.control.Test.plate.4.G1\tWater\t384PP_AQ_BP2_HT\tN2\t6.089\t2680.0\tNormalizedDNA\tN2\nblank.Test.plate.4.H1\tWater\t384PP_AQ_BP2_HT\tP2\t0.342\t0.0\tNormalizedDNA\tP2\n1.SKB1.640202.Test.plate.4.A2\tWater\t384PP_AQ_BP2_HT\tB4\t12.068\t3085.0\tNormalizedDNA\tB4\n1.SKB2.640194.Test.plate.4.B2\tWater\t384PP_AQ_BP2_HT\tD4\t12.068\t3085.0\tNormalizedDNA\tD4\n1.SKB3.640195.Test.plate.4.C2\tWater\t384PP_AQ_BP2_HT\tF4\t12.068\t3085.0\tNormalizedDNA\tF4\n1.SKB4.640189.Test.plate.4.D2\tWater\t384PP_AQ_BP2_HT\tH4\t12.068\t3085.0\tNormalizedDNA\tH4\n1.SKB5.640181.Test.plate.4.E2\tWater\t384PP_AQ_BP2_HT\tJ4\t12.068\t3085.0\tNormalizedDNA\tJ4\n1.SKB6.640176.Test.plate.4.F2\tWater\t384PP_AQ_BP2_HT\tL4\t12.068\t3085.0\tNormalizedDNA\tL4\nvibrio.positive.control.Test.plate.4.G2\tWater\t384PP_AQ_BP2_HT\tN4\t6.089\t2680.0\tNormalizedDNA\tN4\nblank.Test.plate.4.H2\tWater\t384PP_AQ_BP2_HT\tP4\t0.342\t0.0\tNormalizedDNA\tP4\n1.SKB1.640202.Test.plate.4.A3\tWater\t384PP_AQ_BP2_HT\tB6\t12.068\t3085.0\tNormalizedDNA\tB6\n1.SKB2.640194.Test.plate.4.B3\tWater\t384PP_AQ_BP2_HT\tD6\t12.068\t3085.0\tNormalizedDNA\tD6\n1.SKB3.640195.Test.plate.4.C3\tWater\t384PP_AQ_BP2_HT\tF6\t12.068\t3085.0\tNormalizedDNA\tF6\n1.SKB4.640189.Test.plate.4.D3\tWater\t384PP_AQ_BP2_HT\tH6\t12.068\t3085.0\tNormalizedDNA\tH6\n1.SKB5.640181.Test.plate.4.E3\tWater\t384PP_AQ_BP2_HT\tJ6\t12.068\t3085.0\tNormalizedDNA\tJ6\n1.SKB6.640176.Test.plate.4.F3\tWater\t384PP_AQ_BP2_HT\tL6\t12.068\t3085.0\tNormalizedDNA\tL6\nvibrio.positive.control.Test.plate.4.G3\tWater\t384PP_AQ_BP2_HT\tN6\t6.089\t2680.0\tNormalizedDNA\tN6\nblank.Test.plate.4.H3\tWater\t384PP_AQ_BP2_HT\tP6\t0.342\t0.0\tNormalizedDNA\tP6\n1.SKB1.640202.Test.plate.4.A4\tWater\t384PP_AQ_BP2_HT\tB8\t12.068\t3085.0\tNormalizedDNA\tB8\n1.SKB2.640194.Test.plate.4.B4\tWater\t384PP_AQ_BP2_HT\tD8\t12.068\t3085.0\tNormalizedDNA\tD8\n1.SKB3.640195.Test.plate.4.C4\tWater\t384PP_AQ_BP2_HT\tF8\t12.068\t3085.0\tNormalizedDNA\tF8\n1.SKB4.640189.Test.plate.4.D4\tWater\t384PP_AQ_BP2_HT\tH8\t12.068\t3085.0\tNormalizedDNA\tH8\n1.SKB5.640181.Test.plate.4.E4\tWater\t384PP_AQ_BP2_HT\tJ8\t12.068\t3085.0\tNormalizedDNA\tJ8\n1.SKB6.640176.Test.plate.4.F4\tWater\t384PP_AQ_BP2_HT\tL8\t12.068\t3085.0\tNormalizedDNA\tL8\nvibrio.positive.control.Test.plate.4.G4\tWater\t384PP_AQ_BP2_HT\tN8\t6.089\t2680.0\tNormalizedDNA\tN8\nblank.Test.plate.4.H4\tWater\t384PP_AQ_BP2_HT\tP8\t0.342\t0.0\tNormalizedDNA\tP8\n1.SKB1.640202.Test.plate.4.A5\tWater\t384PP_AQ_BP2_HT\tB10\t12.068\t3085.0\tNormalizedDNA\tB10\n1.SKB2.640194.Test.plate.4.B5\tWater\t384PP_AQ_BP2_HT\tD10\t12.068\t3085.0\tNormalizedDNA\tD10\n1.SKB3.640195.Test.plate.4.C5\tWater\t384PP_AQ_BP2_HT\tF10\t12.068\t3085.0\tNormalizedDNA\tF10\n1.SKB4.640189.Test.plate.4.D5\tWater\t384PP_AQ_BP2_HT\tH10\t12.068\t3085.0\tNormalizedDNA\tH10\n1.SKB5.640181.Test.plate.4.E5\tWater\t384PP_AQ_BP2_HT\tJ10\t12.068\t3085.0\tNormalizedDNA\tJ10\n1.SKB6.640176.Test.plate.4.F5\tWater\t384PP_AQ_BP2_HT\tL10\t12.068\t3085.0\tNormalizedDNA\tL10\nvibrio.positive.control.Test.plate.4.G5\tWater\t384PP_AQ_BP2_HT\tN10\t6.089\t2680.0\tNormalizedDNA\tN10\nblank.Test.plate.4.H5\tWater\t384PP_AQ_BP2_HT\tP10\t0.342\t0.0\tNormalizedDNA\tP10\n1.SKB1.640202.Test.plate.4.A6\tWater\t384PP_AQ_BP2_HT\tB12\t12.068\t3085.0\tNormalizedDNA\tB12\n1.SKB2.640194.Test.plate.4.B6\tWater\t384PP_AQ_BP2_HT\tD12\t12.068\t3085.0\tNormalizedDNA\tD12\n1.SKB3.640195.Test.plate.4.C6\tWater\t384PP_AQ_BP2_HT\tF12\t12.068\t3085.0\tNormalizedDNA\tF12\n1.SKB4.640189.Test.plate.4.D6\tWater\t384PP_AQ_BP2_HT\tH12\t12.068\t3085.0\tNormalizedDNA\tH12\n1.SKB5.640181.Test.plate.4.E6\tWater\t384PP_AQ_BP2_HT\tJ12\t12.068\t3085.0\tNormalizedDNA\tJ12\n1.SKB6.640176.Test.plate.4.F6\tWater\t384PP_AQ_BP2_HT\tL12\t12.068\t3085.0\tNormalizedDNA\tL12\nvibrio.positive.control.Test.plate.4.G6\tWater\t384PP_AQ_BP2_HT\tN12\t6.089\t2680.0\tNormalizedDNA\tN12\nblank.Test.plate.4.H6\tWater\t384PP_AQ_BP2_HT\tP12\t0.342\t0.0\tNormalizedDNA\tP12\n1.SKB1.640202.Test.plate.4.A7\tWater\t384PP_AQ_BP2_HT\tB14\t12.068\t3085.0\tNormalizedDNA\tB14\n1.SKB2.640194.Test.plate.4.B7\tWater\t384PP_AQ_BP2_HT\tD14\t12.068\t3085.0\tNormalizedDNA\tD14\n1.SKB3.640195.Test.plate.4.C7\tWater\t384PP_AQ_BP2_HT\tF14\t12.068\t3085.0\tNormalizedDNA\tF14\n1.SKB4.640189.Test.plate.4.D7\tWater\t384PP_AQ_BP2_HT\tH14\t12.068\t3085.0\tNormalizedDNA\tH14\n1.SKB5.640181.Test.plate.4.E7\tWater\t384PP_AQ_BP2_HT\tJ14\t12.068\t3085.0\tNormalizedDNA\tJ14\n1.SKB6.640176.Test.plate.4.F7\tWater\t384PP_AQ_BP2_HT\tL14\t12.068\t3085.0\tNormalizedDNA\tL14\nvibrio.positive.control.Test.plate.4.G7\tWater\t384PP_AQ_BP2_HT\tN14\t6.089\t2680.0\tNormalizedDNA\tN14\nblank.Test.plate.4.H7\tWater\t384PP_AQ_BP2_HT\tP14\t0.342\t0.0\tNormalizedDNA\tP14\n1.SKB1.640202.Test.plate.4.A8\tWater\t384PP_AQ_BP2_HT\tB16\t12.068\t3085.0\tNormalizedDNA\tB16\n1.SKB2.640194.Test.plate.4.B8\tWater\t384PP_AQ_BP2_HT\tD16\t12.068\t3085.0\tNormalizedDNA\tD16\n1.SKB3.640195.Test.plate.4.C8\tWater\t384PP_AQ_BP2_HT\tF16\t12.068\t3085.0\tNormalizedDNA\tF16\n1.SKB4.640189.Test.plate.4.D8\tWater\t384PP_AQ_BP2_HT\tH16\t12.068\t3085.0\tNormalizedDNA\tH16\n1.SKB5.640181.Test.plate.4.E8\tWater\t384PP_AQ_BP2_HT\tJ16\t12.068\t3085.0\tNormalizedDNA\tJ16\n1.SKB6.640176.Test.plate.4.F8\tWater\t384PP_AQ_BP2_HT\tL16\t12.068\t3085.0\tNormalizedDNA\tL16\nvibrio.positive.control.Test.plate.4.G8\tWater\t384PP_AQ_BP2_HT\tN16\t6.089\t2680.0\tNormalizedDNA\tN16\nblank.Test.plate.4.H8\tWater\t384PP_AQ_BP2_HT\tP16\t0.342\t0.0\tNormalizedDNA\tP16\n1.SKB1.640202.Test.plate.4.A9\tWater\t384PP_AQ_BP2_HT\tB18\t12.068\t3085.0\tNormalizedDNA\tB18\n1.SKB2.640194.Test.plate.4.B9\tWater\t384PP_AQ_BP2_HT\tD18\t12.068\t3085.0\tNormalizedDNA\tD18\n1.SKB3.640195.Test.plate.4.C9\tWater\t384PP_AQ_BP2_HT\tF18\t12.068\t3085.0\tNormalizedDNA\tF18\n1.SKB4.640189.Test.plate.4.D9\tWater\t384PP_AQ_BP2_HT\tH18\t12.068\t3085.0\tNormalizedDNA\tH18\n1.SKB5.640181.Test.plate.4.E9\tWater\t384PP_AQ_BP2_HT\tJ18\t12.068\t3085.0\tNormalizedDNA\tJ18\n1.SKB6.640176.Test.plate.4.F9\tWater\t384PP_AQ_BP2_HT\tL18\t12.068\t3085.0\tNormalizedDNA\tL18\nvibrio.positive.control.Test.plate.4.G9\tWater\t384PP_AQ_BP2_HT\tN18\t6.089\t2680.0\tNormalizedDNA\tN18\nblank.Test.plate.4.H9\tWater\t384PP_AQ_BP2_HT\tP18\t0.342\t0.0\tNormalizedDNA\tP18\n1.SKB1.640202.Test.plate.4.A10\tWater\t384PP_AQ_BP2_HT\tB20\t12.068\t3085.0\tNormalizedDNA\tB20\n1.SKB2.640194.Test.plate.4.B10\tWater\t384PP_AQ_BP2_HT\tD20\t12.068\t3085.0\tNormalizedDNA\tD20\n1.SKB3.640195.Test.plate.4.C10\tWater\t384PP_AQ_BP2_HT\tF20\t12.068\t3085.0\tNormalizedDNA\tF20\n1.SKB4.640189.Test.plate.4.D10\tWater\t384PP_AQ_BP2_HT\tH20\t12.068\t3085.0\tNormalizedDNA\tH20\n1.SKB5.640181.Test.plate.4.E10\tWater\t384PP_AQ_BP2_HT\tJ20\t12.068\t3085.0\tNormalizedDNA\tJ20\n1.SKB6.640176.Test.plate.4.F10\tWater\t384PP_AQ_BP2_HT\tL20\t12.068\t3085.0\tNormalizedDNA\tL20\nvibrio.positive.control.Test.plate.4.G10\tWater\t384PP_AQ_BP2_HT\tN20\t6.089\t2680.0\tNormalizedDNA\tN20\nblank.Test.plate.4.H10\tWater\t384PP_AQ_BP2_HT\tP20\t0.342\t0.0\tNormalizedDNA\tP20\n1.SKB1.640202.Test.plate.4.A11\tWater\t384PP_AQ_BP2_HT\tB22\t12.068\t3085.0\tNormalizedDNA\tB22\n1.SKB2.640194.Test.plate.4.B11\tWater\t384PP_AQ_BP2_HT\tD22\t12.068\t3085.0\tNormalizedDNA\tD22\n1.SKB3.640195.Test.plate.4.C11\tWater\t384PP_AQ_BP2_HT\tF22\t12.068\t3085.0\tNormalizedDNA\tF22\n1.SKB4.640189.Test.plate.4.D11\tWater\t384PP_AQ_BP2_HT\tH22\t12.068\t3085.0\tNormalizedDNA\tH22\n1.SKB5.640181.Test.plate.4.E11\tWater\t384PP_AQ_BP2_HT\tJ22\t12.068\t3085.0\tNormalizedDNA\tJ22\n1.SKB6.640176.Test.plate.4.F11\tWater\t384PP_AQ_BP2_HT\tL22\t12.068\t3085.0\tNormalizedDNA\tL22\nvibrio.positive.control.Test.plate.4.G11\tWater\t384PP_AQ_BP2_HT\tN22\t6.089\t2680.0\tNormalizedDNA\tN22\nblank.Test.plate.4.H11\tWater\t384PP_AQ_BP2_HT\tP22\t0.342\t0.0\tNormalizedDNA\tP22\n1.SKB1.640202.Test.plate.4.A12\tWater\t384PP_AQ_BP2_HT\tB24\t12.068\t3085.0\tNormalizedDNA\tB24\n1.SKB2.640194.Test.plate.4.B12\tWater\t384PP_AQ_BP2_HT\tD24\t12.068\t3085.0\tNormalizedDNA\tD24\n1.SKB3.640195.Test.plate.4.C12\tWater\t384PP_AQ_BP2_HT\tF24\t12.068\t3085.0\tNormalizedDNA\tF24\n1.SKB4.640189.Test.plate.4.D12\tWater\t384PP_AQ_BP2_HT\tH24\t12.068\t3085.0\tNormalizedDNA\tH24\n1.SKB5.640181.Test.plate.4.E12\tWater\t384PP_AQ_BP2_HT\tJ24\t12.068\t3085.0\tNormalizedDNA\tJ24\n1.SKM6.640187.Test.plate.4.F12\tWater\t384PP_AQ_BP2_HT\tL24\t12.068\t3085.0\tNormalizedDNA\tL24\nvibrio.positive.control.Test.plate.4.G12\tWater\t384PP_AQ_BP2_HT\tN24\t6.089\t2680.0\tNormalizedDNA\tN24\n1.SKB1.640202.Test.plate.1.A1\tSample\t384PP_AQ_BP2_HT\tA1\t12.068\t415.0\tNormalizedDNA\tA1\n1.SKB2.640194.Test.plate.1.B1\tSample\t384PP_AQ_BP2_HT\tC1\t12.068\t415.0\tNormalizedDNA\tC1\n1.SKB3.640195.Test.plate.1.C1\tSample\t384PP_AQ_BP2_HT\tE1\t12.068\t415.0\tNormalizedDNA\tE1\n1.SKB4.640189.Test.plate.1.D1\tSample\t384PP_AQ_BP2_HT\tG1\t12.068\t415.0\tNormalizedDNA\tG1\n1.SKB5.640181.Test.plate.1.E1\tSample\t384PP_AQ_BP2_HT\tI1\t12.068\t415.0\tNormalizedDNA\tI1\n1.SKB6.640176.Test.plate.1.F1\tSample\t384PP_AQ_BP2_HT\tK1\t12.068\t415.0\tNormalizedDNA\tK1\nvibrio.positive.control.Test.plate.1.G1\tSample\t384PP_AQ_BP2_HT\tM1\t6.089\t820.0\tNormalizedDNA\tM1\nblank.Test.plate.1.H1\tSample\t384PP_AQ_BP2_HT\tO1\t0.342\t3500.0\tNormalizedDNA\tO1\n1.SKB1.640202.Test.plate.1.A2\tSample\t384PP_AQ_BP2_HT\tA3\t12.068\t415.0\tNormalizedDNA\tA3\n1.SKB2.640194.Test.plate.1.B2\tSample\t384PP_AQ_BP2_HT\tC3\t12.068\t415.0\tNormalizedDNA\tC3\n1.SKB3.640195.Test.plate.1.C2\tSample\t384PP_AQ_BP2_HT\tE3\t12.068\t415.0\tNormalizedDNA\tE3\n1.SKB4.640189.Test.plate.1.D2\tSample\t384PP_AQ_BP2_HT\tG3\t12.068\t415.0\tNormalizedDNA\tG3\n1.SKB5.640181.Test.plate.1.E2\tSample\t384PP_AQ_BP2_HT\tI3\t12.068\t415.0\tNormalizedDNA\tI3\n1.SKB6.640176.Test.plate.1.F2\tSample\t384PP_AQ_BP2_HT\tK3\t12.068\t415.0\tNormalizedDNA\tK3\nvibrio.positive.control.Test.plate.1.G2\tSample\t384PP_AQ_BP2_HT\tM3\t6.089\t820.0\tNormalizedDNA\tM3\nblank.Test.plate.1.H2\tSample\t384PP_AQ_BP2_HT\tO3\t0.342\t3500.0\tNormalizedDNA\tO3\n1.SKB1.640202.Test.plate.1.A3\tSample\t384PP_AQ_BP2_HT\tA5\t12.068\t415.0\tNormalizedDNA\tA5\n1.SKB2.640194.Test.plate.1.B3\tSample\t384PP_AQ_BP2_HT\tC5\t12.068\t415.0\tNormalizedDNA\tC5\n1.SKB3.640195.Test.plate.1.C3\tSample\t384PP_AQ_BP2_HT\tE5\t12.068\t415.0\tNormalizedDNA\tE5\n1.SKB4.640189.Test.plate.1.D3\tSample\t384PP_AQ_BP2_HT\tG5\t12.068\t415.0\tNormalizedDNA\tG5\n1.SKB5.640181.Test.plate.1.E3\tSample\t384PP_AQ_BP2_HT\tI5\t12.068\t415.0\tNormalizedDNA\tI5\n1.SKB6.640176.Test.plate.1.F3\tSample\t384PP_AQ_BP2_HT\tK5\t12.068\t415.0\tNormalizedDNA\tK5\nvibrio.positive.control.Test.plate.1.G3\tSample\t384PP_AQ_BP2_HT\tM5\t6.089\t820.0\tNormalizedDNA\tM5\nblank.Test.plate.1.H3\tSample\t384PP_AQ_BP2_HT\tO5\t0.342\t3500.0\tNormalizedDNA\tO5\n1.SKB1.640202.Test.plate.1.A4\tSample\t384PP_AQ_BP2_HT\tA7\t12.068\t415.0\tNormalizedDNA\tA7\n1.SKB2.640194.Test.plate.1.B4\tSample\t384PP_AQ_BP2_HT\tC7\t12.068\t415.0\tNormalizedDNA\tC7\n1.SKB3.640195.Test.plate.1.C4\tSample\t384PP_AQ_BP2_HT\tE7\t12.068\t415.0\tNormalizedDNA\tE7\n1.SKB4.640189.Test.plate.1.D4\tSample\t384PP_AQ_BP2_HT\tG7\t12.068\t415.0\tNormalizedDNA\tG7\n1.SKB5.640181.Test.plate.1.E4\tSample\t384PP_AQ_BP2_HT\tI7\t12.068\t415.0\tNormalizedDNA\tI7\n1.SKB6.640176.Test.plate.1.F4\tSample\t384PP_AQ_BP2_HT\tK7\t12.068\t415.0\tNormalizedDNA\tK7\nvibrio.positive.control.Test.plate.1.G4\tSample\t384PP_AQ_BP2_HT\tM7\t6.089\t820.0\tNormalizedDNA\tM7\nblank.Test.plate.1.H4\tSample\t384PP_AQ_BP2_HT\tO7\t0.342\t3500.0\tNormalizedDNA\tO7\n1.SKB1.640202.Test.plate.1.A5\tSample\t384PP_AQ_BP2_HT\tA9\t12.068\t415.0\tNormalizedDNA\tA9\n1.SKB2.640194.Test.plate.1.B5\tSample\t384PP_AQ_BP2_HT\tC9\t12.068\t415.0\tNormalizedDNA\tC9\n1.SKB3.640195.Test.plate.1.C5\tSample\t384PP_AQ_BP2_HT\tE9\t12.068\t415.0\tNormalizedDNA\tE9\n1.SKB4.640189.Test.plate.1.D5\tSample\t384PP_AQ_BP2_HT\tG9\t12.068\t415.0\tNormalizedDNA\tG9\n1.SKB5.640181.Test.plate.1.E5\tSample\t384PP_AQ_BP2_HT\tI9\t12.068\t415.0\tNormalizedDNA\tI9\n1.SKB6.640176.Test.plate.1.F5\tSample\t384PP_AQ_BP2_HT\tK9\t12.068\t415.0\tNormalizedDNA\tK9\nvibrio.positive.control.Test.plate.1.G5\tSample\t384PP_AQ_BP2_HT\tM9\t6.089\t820.0\tNormalizedDNA\tM9\nblank.Test.plate.1.H5\tSample\t384PP_AQ_BP2_HT\tO9\t0.342\t3500.0\tNormalizedDNA\tO9\n1.SKB1.640202.Test.plate.1.A6\tSample\t384PP_AQ_BP2_HT\tA11\t12.068\t415.0\tNormalizedDNA\tA11\n1.SKB2.640194.Test.plate.1.B6\tSample\t384PP_AQ_BP2_HT\tC11\t12.068\t415.0\tNormalizedDNA\tC11\n1.SKB3.640195.Test.plate.1.C6\tSample\t384PP_AQ_BP2_HT\tE11\t12.068\t415.0\tNormalizedDNA\tE11\n1.SKB4.640189.Test.plate.1.D6\tSample\t384PP_AQ_BP2_HT\tG11\t12.068\t415.0\tNormalizedDNA\tG11\n1.SKB5.640181.Test.plate.1.E6\tSample\t384PP_AQ_BP2_HT\tI11\t12.068\t415.0\tNormalizedDNA\tI11\n1.SKB6.640176.Test.plate.1.F6\tSample\t384PP_AQ_BP2_HT\tK11\t12.068\t415.0\tNormalizedDNA\tK11\nvibrio.positive.control.Test.plate.1.G6\tSample\t384PP_AQ_BP2_HT\tM11\t6.089\t820.0\tNormalizedDNA\tM11\nblank.Test.plate.1.H6\tSample\t384PP_AQ_BP2_HT\tO11\t0.342\t3500.0\tNormalizedDNA\tO11\n1.SKB1.640202.Test.plate.1.A7\tSample\t384PP_AQ_BP2_HT\tA13\t12.068\t415.0\tNormalizedDNA\tA13\n1.SKB2.640194.Test.plate.1.B7\tSample\t384PP_AQ_BP2_HT\tC13\t12.068\t415.0\tNormalizedDNA\tC13\n1.SKB3.640195.Test.plate.1.C7\tSample\t384PP_AQ_BP2_HT\tE13\t12.068\t415.0\tNormalizedDNA\tE13\n1.SKB4.640189.Test.plate.1.D7\tSample\t384PP_AQ_BP2_HT\tG13\t12.068\t415.0\tNormalizedDNA\tG13\n1.SKB5.640181.Test.plate.1.E7\tSample\t384PP_AQ_BP2_HT\tI13\t12.068\t415.0\tNormalizedDNA\tI13\n1.SKB6.640176.Test.plate.1.F7\tSample\t384PP_AQ_BP2_HT\tK13\t12.068\t415.0\tNormalizedDNA\tK13\nvibrio.positive.control.Test.plate.1.G7\tSample\t384PP_AQ_BP2_HT\tM13\t6.089\t820.0\tNormalizedDNA\tM13\nblank.Test.plate.1.H7\tSample\t384PP_AQ_BP2_HT\tO13\t0.342\t3500.0\tNormalizedDNA\tO13\n1.SKB1.640202.Test.plate.1.A8\tSample\t384PP_AQ_BP2_HT\tA15\t12.068\t415.0\tNormalizedDNA\tA15\n1.SKB2.640194.Test.plate.1.B8\tSample\t384PP_AQ_BP2_HT\tC15\t12.068\t415.0\tNormalizedDNA\tC15\n1.SKB3.640195.Test.plate.1.C8\tSample\t384PP_AQ_BP2_HT\tE15\t12.068\t415.0\tNormalizedDNA\tE15\n1.SKB4.640189.Test.plate.1.D8\tSample\t384PP_AQ_BP2_HT\tG15\t12.068\t415.0\tNormalizedDNA\tG15\n1.SKB5.640181.Test.plate.1.E8\tSample\t384PP_AQ_BP2_HT\tI15\t12.068\t415.0\tNormalizedDNA\tI15\n1.SKB6.640176.Test.plate.1.F8\tSample\t384PP_AQ_BP2_HT\tK15\t12.068\t415.0\tNormalizedDNA\tK15\nvibrio.positive.control.Test.plate.1.G8\tSample\t384PP_AQ_BP2_HT\tM15\t6.089\t820.0\tNormalizedDNA\tM15\nblank.Test.plate.1.H8\tSample\t384PP_AQ_BP2_HT\tO15\t0.342\t3500.0\tNormalizedDNA\tO15\n1.SKB1.640202.Test.plate.1.A9\tSample\t384PP_AQ_BP2_HT\tA17\t12.068\t415.0\tNormalizedDNA\tA17\n1.SKB2.640194.Test.plate.1.B9\tSample\t384PP_AQ_BP2_HT\tC17\t12.068\t415.0\tNormalizedDNA\tC17\n1.SKB3.640195.Test.plate.1.C9\tSample\t384PP_AQ_BP2_HT\tE17\t12.068\t415.0\tNormalizedDNA\tE17\n1.SKB4.640189.Test.plate.1.D9\tSample\t384PP_AQ_BP2_HT\tG17\t12.068\t415.0\tNormalizedDNA\tG17\n1.SKB5.640181.Test.plate.1.E9\tSample\t384PP_AQ_BP2_HT\tI17\t12.068\t415.0\tNormalizedDNA\tI17\n1.SKB6.640176.Test.plate.1.F9\tSample\t384PP_AQ_BP2_HT\tK17\t12.068\t415.0\tNormalizedDNA\tK17\nvibrio.positive.control.Test.plate.1.G9\tSample\t384PP_AQ_BP2_HT\tM17\t6.089\t820.0\tNormalizedDNA\tM17\nblank.Test.plate.1.H9\tSample\t384PP_AQ_BP2_HT\tO17\t0.342\t3500.0\tNormalizedDNA\tO17\n1.SKB1.640202.Test.plate.1.A10\tSample\t384PP_AQ_BP2_HT\tA19\t12.068\t415.0\tNormalizedDNA\tA19\n1.SKB2.640194.Test.plate.1.B10\tSample\t384PP_AQ_BP2_HT\tC19\t12.068\t415.0\tNormalizedDNA\tC19\n1.SKB3.640195.Test.plate.1.C10\tSample\t384PP_AQ_BP2_HT\tE19\t12.068\t415.0\tNormalizedDNA\tE19\n1.SKB4.640189.Test.plate.1.D10\tSample\t384PP_AQ_BP2_HT\tG19\t12.068\t415.0\tNormalizedDNA\tG19\n1.SKB5.640181.Test.plate.1.E10\tSample\t384PP_AQ_BP2_HT\tI19\t12.068\t415.0\tNormalizedDNA\tI19\n1.SKB6.640176.Test.plate.1.F10\tSample\t384PP_AQ_BP2_HT\tK19\t12.068\t415.0\tNormalizedDNA\tK19\nvibrio.positive.control.Test.plate.1.G10\tSample\t384PP_AQ_BP2_HT\tM19\t6.089\t820.0\tNormalizedDNA\tM19\nblank.Test.plate.1.H10\tSample\t384PP_AQ_BP2_HT\tO19\t0.342\t3500.0\tNormalizedDNA\tO19\n1.SKB1.640202.Test.plate.1.A11\tSample\t384PP_AQ_BP2_HT\tA21\t12.068\t415.0\tNormalizedDNA\tA21\n1.SKB2.640194.Test.plate.1.B11\tSample\t384PP_AQ_BP2_HT\tC21\t12.068\t415.0\tNormalizedDNA\tC21\n1.SKB3.640195.Test.plate.1.C11\tSample\t384PP_AQ_BP2_HT\tE21\t12.068\t415.0\tNormalizedDNA\tE21\n1.SKB4.640189.Test.plate.1.D11\tSample\t384PP_AQ_BP2_HT\tG21\t12.068\t415.0\tNormalizedDNA\tG21\n1.SKB5.640181.Test.plate.1.E11\tSample\t384PP_AQ_BP2_HT\tI21\t12.068\t415.0\tNormalizedDNA\tI21\n1.SKB6.640176.Test.plate.1.F11\tSample\t384PP_AQ_BP2_HT\tK21\t12.068\t415.0\tNormalizedDNA\tK21\nvibrio.positive.control.Test.plate.1.G11\tSample\t384PP_AQ_BP2_HT\tM21\t6.089\t820.0\tNormalizedDNA\tM21\nblank.Test.plate.1.H11\tSample\t384PP_AQ_BP2_HT\tO21\t0.342\t3500.0\tNormalizedDNA\tO21\n1.SKB1.640202.Test.plate.1.A12\tSample\t384PP_AQ_BP2_HT\tA23\t12.068\t415.0\tNormalizedDNA\tA23\n1.SKB2.640194.Test.plate.1.B12\tSample\t384PP_AQ_BP2_HT\tC23\t12.068\t415.0\tNormalizedDNA\tC23\n1.SKB3.640195.Test.plate.1.C12\tSample\t384PP_AQ_BP2_HT\tE23\t12.068\t415.0\tNormalizedDNA\tE23\n1.SKB4.640189.Test.plate.1.D12\tSample\t384PP_AQ_BP2_HT\tG23\t12.068\t415.0\tNormalizedDNA\tG23\n1.SKB5.640181.Test.plate.1.E12\tSample\t384PP_AQ_BP2_HT\tI23\t12.068\t415.0\tNormalizedDNA\tI23\n1.SKB8.640193.Test.plate.1.F12\tSample\t384PP_AQ_BP2_HT\tK23\t12.068\t415.0\tNormalizedDNA\tK23\nvibrio.positive.control.Test.plate.1.G12\tSample\t384PP_AQ_BP2_HT\tM23\t6.089\t820.0\tNormalizedDNA\tM23\n1.SKB1.640202.Test.plate.2.A1\tSample\t384PP_AQ_BP2_HT\tA2\t12.068\t415.0\tNormalizedDNA\tA2\n1.SKB2.640194.Test.plate.2.B1\tSample\t384PP_AQ_BP2_HT\tC2\t12.068\t415.0\tNormalizedDNA\tC2\n1.SKB3.640195.Test.plate.2.C1\tSample\t384PP_AQ_BP2_HT\tE2\t12.068\t415.0\tNormalizedDNA\tE2\n1.SKB4.640189.Test.plate.2.D1\tSample\t384PP_AQ_BP2_HT\tG2\t12.068\t415.0\tNormalizedDNA\tG2\n1.SKB5.640181.Test.plate.2.E1\tSample\t384PP_AQ_BP2_HT\tI2\t12.068\t415.0\tNormalizedDNA\tI2\n1.SKB6.640176.Test.plate.2.F1\tSample\t384PP_AQ_BP2_HT\tK2\t12.068\t415.0\tNormalizedDNA\tK2\nvibrio.positive.control.Test.plate.2.G1\tSample\t384PP_AQ_BP2_HT\tM2\t6.089\t820.0\tNormalizedDNA\tM2\nblank.Test.plate.2.H1\tSample\t384PP_AQ_BP2_HT\tO2\t0.342\t3500.0\tNormalizedDNA\tO2\n1.SKB1.640202.Test.plate.2.A2\tSample\t384PP_AQ_BP2_HT\tA4\t12.068\t415.0\tNormalizedDNA\tA4\n1.SKB2.640194.Test.plate.2.B2\tSample\t384PP_AQ_BP2_HT\tC4\t12.068\t415.0\tNormalizedDNA\tC4\n1.SKB3.640195.Test.plate.2.C2\tSample\t384PP_AQ_BP2_HT\tE4\t12.068\t415.0\tNormalizedDNA\tE4\n1.SKB4.640189.Test.plate.2.D2\tSample\t384PP_AQ_BP2_HT\tG4\t12.068\t415.0\tNormalizedDNA\tG4\n1.SKB5.640181.Test.plate.2.E2\tSample\t384PP_AQ_BP2_HT\tI4\t12.068\t415.0\tNormalizedDNA\tI4\n1.SKB6.640176.Test.plate.2.F2\tSample\t384PP_AQ_BP2_HT\tK4\t12.068\t415.0\tNormalizedDNA\tK4\nvibrio.positive.control.Test.plate.2.G2\tSample\t384PP_AQ_BP2_HT\tM4\t6.089\t820.0\tNormalizedDNA\tM4\nblank.Test.plate.2.H2\tSample\t384PP_AQ_BP2_HT\tO4\t0.342\t3500.0\tNormalizedDNA\tO4\n1.SKB1.640202.Test.plate.2.A3\tSample\t384PP_AQ_BP2_HT\tA6\t12.068\t415.0\tNormalizedDNA\tA6\n1.SKB2.640194.Test.plate.2.B3\tSample\t384PP_AQ_BP2_HT\tC6\t12.068\t415.0\tNormalizedDNA\tC6\n1.SKB3.640195.Test.plate.2.C3\tSample\t384PP_AQ_BP2_HT\tE6\t12.068\t415.0\tNormalizedDNA\tE6\n1.SKB4.640189.Test.plate.2.D3\tSample\t384PP_AQ_BP2_HT\tG6\t12.068\t415.0\tNormalizedDNA\tG6\n1.SKB5.640181.Test.plate.2.E3\tSample\t384PP_AQ_BP2_HT\tI6\t12.068\t415.0\tNormalizedDNA\tI6\n1.SKB6.640176.Test.plate.2.F3\tSample\t384PP_AQ_BP2_HT\tK6\t12.068\t415.0\tNormalizedDNA\tK6\nvibrio.positive.control.Test.plate.2.G3\tSample\t384PP_AQ_BP2_HT\tM6\t6.089\t820.0\tNormalizedDNA\tM6\nblank.Test.plate.2.H3\tSample\t384PP_AQ_BP2_HT\tO6\t0.342\t3500.0\tNormalizedDNA\tO6\n1.SKB1.640202.Test.plate.2.A4\tSample\t384PP_AQ_BP2_HT\tA8\t12.068\t415.0\tNormalizedDNA\tA8\n1.SKB2.640194.Test.plate.2.B4\tSample\t384PP_AQ_BP2_HT\tC8\t12.068\t415.0\tNormalizedDNA\tC8\n1.SKB3.640195.Test.plate.2.C4\tSample\t384PP_AQ_BP2_HT\tE8\t12.068\t415.0\tNormalizedDNA\tE8\n1.SKB4.640189.Test.plate.2.D4\tSample\t384PP_AQ_BP2_HT\tG8\t12.068\t415.0\tNormalizedDNA\tG8\n1.SKB5.640181.Test.plate.2.E4\tSample\t384PP_AQ_BP2_HT\tI8\t12.068\t415.0\tNormalizedDNA\tI8\n1.SKB6.640176.Test.plate.2.F4\tSample\t384PP_AQ_BP2_HT\tK8\t12.068\t415.0\tNormalizedDNA\tK8\nvibrio.positive.control.Test.plate.2.G4\tSample\t384PP_AQ_BP2_HT\tM8\t6.089\t820.0\tNormalizedDNA\tM8\nblank.Test.plate.2.H4\tSample\t384PP_AQ_BP2_HT\tO8\t0.342\t3500.0\tNormalizedDNA\tO8\n1.SKB1.640202.Test.plate.2.A5\tSample\t384PP_AQ_BP2_HT\tA10\t12.068\t415.0\tNormalizedDNA\tA10\n1.SKB2.640194.Test.plate.2.B5\tSample\t384PP_AQ_BP2_HT\tC10\t12.068\t415.0\tNormalizedDNA\tC10\n1.SKB3.640195.Test.plate.2.C5\tSample\t384PP_AQ_BP2_HT\tE10\t12.068\t415.0\tNormalizedDNA\tE10\n1.SKB4.640189.Test.plate.2.D5\tSample\t384PP_AQ_BP2_HT\tG10\t12.068\t415.0\tNormalizedDNA\tG10\n1.SKB5.640181.Test.plate.2.E5\tSample\t384PP_AQ_BP2_HT\tI10\t12.068\t415.0\tNormalizedDNA\tI10\n1.SKB6.640176.Test.plate.2.F5\tSample\t384PP_AQ_BP2_HT\tK10\t12.068\t415.0\tNormalizedDNA\tK10\nvibrio.positive.control.Test.plate.2.G5\tSample\t384PP_AQ_BP2_HT\tM10\t6.089\t820.0\tNormalizedDNA\tM10\nblank.Test.plate.2.H5\tSample\t384PP_AQ_BP2_HT\tO10\t0.342\t3500.0\tNormalizedDNA\tO10\n1.SKB1.640202.Test.plate.2.A6\tSample\t384PP_AQ_BP2_HT\tA12\t12.068\t415.0\tNormalizedDNA\tA12\n1.SKB2.640194.Test.plate.2.B6\tSample\t384PP_AQ_BP2_HT\tC12\t12.068\t415.0\tNormalizedDNA\tC12\n1.SKB3.640195.Test.plate.2.C6\tSample\t384PP_AQ_BP2_HT\tE12\t12.068\t415.0\tNormalizedDNA\tE12\n1.SKB4.640189.Test.plate.2.D6\tSample\t384PP_AQ_BP2_HT\tG12\t12.068\t415.0\tNormalizedDNA\tG12\n1.SKB5.640181.Test.plate.2.E6\tSample\t384PP_AQ_BP2_HT\tI12\t12.068\t415.0\tNormalizedDNA\tI12\n1.SKB6.640176.Test.plate.2.F6\tSample\t384PP_AQ_BP2_HT\tK12\t12.068\t415.0\tNormalizedDNA\tK12\nvibrio.positive.control.Test.plate.2.G6\tSample\t384PP_AQ_BP2_HT\tM12\t6.089\t820.0\tNormalizedDNA\tM12\nblank.Test.plate.2.H6\tSample\t384PP_AQ_BP2_HT\tO12\t0.342\t3500.0\tNormalizedDNA\tO12\n1.SKB1.640202.Test.plate.2.A7\tSample\t384PP_AQ_BP2_HT\tA14\t12.068\t415.0\tNormalizedDNA\tA14\n1.SKB2.640194.Test.plate.2.B7\tSample\t384PP_AQ_BP2_HT\tC14\t12.068\t415.0\tNormalizedDNA\tC14\n1.SKB3.640195.Test.plate.2.C7\tSample\t384PP_AQ_BP2_HT\tE14\t12.068\t415.0\tNormalizedDNA\tE14\n1.SKB4.640189.Test.plate.2.D7\tSample\t384PP_AQ_BP2_HT\tG14\t12.068\t415.0\tNormalizedDNA\tG14\n1.SKB5.640181.Test.plate.2.E7\tSample\t384PP_AQ_BP2_HT\tI14\t12.068\t415.0\tNormalizedDNA\tI14\n1.SKB6.640176.Test.plate.2.F7\tSample\t384PP_AQ_BP2_HT\tK14\t12.068\t415.0\tNormalizedDNA\tK14\nvibrio.positive.control.Test.plate.2.G7\tSample\t384PP_AQ_BP2_HT\tM14\t6.089\t820.0\tNormalizedDNA\tM14\nblank.Test.plate.2.H7\tSample\t384PP_AQ_BP2_HT\tO14\t0.342\t3500.0\tNormalizedDNA\tO14\n1.SKB1.640202.Test.plate.2.A8\tSample\t384PP_AQ_BP2_HT\tA16\t12.068\t415.0\tNormalizedDNA\tA16\n1.SKB2.640194.Test.plate.2.B8\tSample\t384PP_AQ_BP2_HT\tC16\t12.068\t415.0\tNormalizedDNA\tC16\n1.SKB3.640195.Test.plate.2.C8\tSample\t384PP_AQ_BP2_HT\tE16\t12.068\t415.0\tNormalizedDNA\tE16\n1.SKB4.640189.Test.plate.2.D8\tSample\t384PP_AQ_BP2_HT\tG16\t12.068\t415.0\tNormalizedDNA\tG16\n1.SKB5.640181.Test.plate.2.E8\tSample\t384PP_AQ_BP2_HT\tI16\t12.068\t415.0\tNormalizedDNA\tI16\n1.SKB6.640176.Test.plate.2.F8\tSample\t384PP_AQ_BP2_HT\tK16\t12.068\t415.0\tNormalizedDNA\tK16\nvibrio.positive.control.Test.plate.2.G8\tSample\t384PP_AQ_BP2_HT\tM16\t6.089\t820.0\tNormalizedDNA\tM16\nblank.Test.plate.2.H8\tSample\t384PP_AQ_BP2_HT\tO16\t0.342\t3500.0\tNormalizedDNA\tO16\n1.SKB1.640202.Test.plate.2.A9\tSample\t384PP_AQ_BP2_HT\tA18\t12.068\t415.0\tNormalizedDNA\tA18\n1.SKB2.640194.Test.plate.2.B9\tSample\t384PP_AQ_BP2_HT\tC18\t12.068\t415.0\tNormalizedDNA\tC18\n1.SKB3.640195.Test.plate.2.C9\tSample\t384PP_AQ_BP2_HT\tE18\t12.068\t415.0\tNormalizedDNA\tE18\n1.SKB4.640189.Test.plate.2.D9\tSample\t384PP_AQ_BP2_HT\tG18\t12.068\t415.0\tNormalizedDNA\tG18\n1.SKB5.640181.Test.plate.2.E9\tSample\t384PP_AQ_BP2_HT\tI18\t12.068\t415.0\tNormalizedDNA\tI18\n1.SKB6.640176.Test.plate.2.F9\tSample\t384PP_AQ_BP2_HT\tK18\t12.068\t415.0\tNormalizedDNA\tK18\nvibrio.positive.control.Test.plate.2.G9\tSample\t384PP_AQ_BP2_HT\tM18\t6.089\t820.0\tNormalizedDNA\tM18\nblank.Test.plate.2.H9\tSample\t384PP_AQ_BP2_HT\tO18\t0.342\t3500.0\tNormalizedDNA\tO18\n1.SKB1.640202.Test.plate.2.A10\tSample\t384PP_AQ_BP2_HT\tA20\t12.068\t415.0\tNormalizedDNA\tA20\n1.SKB2.640194.Test.plate.2.B10\tSample\t384PP_AQ_BP2_HT\tC20\t12.068\t415.0\tNormalizedDNA\tC20\n1.SKB3.640195.Test.plate.2.C10\tSample\t384PP_AQ_BP2_HT\tE20\t12.068\t415.0\tNormalizedDNA\tE20\n1.SKB4.640189.Test.plate.2.D10\tSample\t384PP_AQ_BP2_HT\tG20\t12.068\t415.0\tNormalizedDNA\tG20\n1.SKB5.640181.Test.plate.2.E10\tSample\t384PP_AQ_BP2_HT\tI20\t12.068\t415.0\tNormalizedDNA\tI20\n1.SKB6.640176.Test.plate.2.F10\tSample\t384PP_AQ_BP2_HT\tK20\t12.068\t415.0\tNormalizedDNA\tK20\nvibrio.positive.control.Test.plate.2.G10\tSample\t384PP_AQ_BP2_HT\tM20\t6.089\t820.0\tNormalizedDNA\tM20\nblank.Test.plate.2.H10\tSample\t384PP_AQ_BP2_HT\tO20\t0.342\t3500.0\tNormalizedDNA\tO20\n1.SKB1.640202.Test.plate.2.A11\tSample\t384PP_AQ_BP2_HT\tA22\t12.068\t415.0\tNormalizedDNA\tA22\n1.SKB2.640194.Test.plate.2.B11\tSample\t384PP_AQ_BP2_HT\tC22\t12.068\t415.0\tNormalizedDNA\tC22\n1.SKB3.640195.Test.plate.2.C11\tSample\t384PP_AQ_BP2_HT\tE22\t12.068\t415.0\tNormalizedDNA\tE22\n1.SKB4.640189.Test.plate.2.D11\tSample\t384PP_AQ_BP2_HT\tG22\t12.068\t415.0\tNormalizedDNA\tG22\n1.SKB5.640181.Test.plate.2.E11\tSample\t384PP_AQ_BP2_HT\tI22\t12.068\t415.0\tNormalizedDNA\tI22\n1.SKB6.640176.Test.plate.2.F11\tSample\t384PP_AQ_BP2_HT\tK22\t12.068\t415.0\tNormalizedDNA\tK22\nvibrio.positive.control.Test.plate.2.G11\tSample\t384PP_AQ_BP2_HT\tM22\t6.089\t820.0\tNormalizedDNA\tM22\nblank.Test.plate.2.H11\tSample\t384PP_AQ_BP2_HT\tO22\t0.342\t3500.0\tNormalizedDNA\tO22\n1.SKB1.640202.Test.plate.2.A12\tSample\t384PP_AQ_BP2_HT\tA24\t12.068\t415.0\tNormalizedDNA\tA24\n1.SKB2.640194.Test.plate.2.B12\tSample\t384PP_AQ_BP2_HT\tC24\t12.068\t415.0\tNormalizedDNA\tC24\n1.SKB3.640195.Test.plate.2.C12\tSample\t384PP_AQ_BP2_HT\tE24\t12.068\t415.0\tNormalizedDNA\tE24\n1.SKB4.640189.Test.plate.2.D12\tSample\t384PP_AQ_BP2_HT\tG24\t12.068\t415.0\tNormalizedDNA\tG24\n1.SKB5.640181.Test.plate.2.E12\tSample\t384PP_AQ_BP2_HT\tI24\t12.068\t415.0\tNormalizedDNA\tI24\n1.SKD1.640179.Test.plate.2.F12\tSample\t384PP_AQ_BP2_HT\tK24\t12.068\t415.0\tNormalizedDNA\tK24\nvibrio.positive.control.Test.plate.2.G12\tSample\t384PP_AQ_BP2_HT\tM24\t6.089\t820.0\tNormalizedDNA\tM24\n1.SKB1.640202.Test.plate.3.A1\tSample\t384PP_AQ_BP2_HT\tB1\t12.068\t415.0\tNormalizedDNA\tB1\n1.SKB2.640194.Test.plate.3.B1\tSample\t384PP_AQ_BP2_HT\tD1\t12.068\t415.0\tNormalizedDNA\tD1\n1.SKB3.640195.Test.plate.3.C1\tSample\t384PP_AQ_BP2_HT\tF1\t12.068\t415.0\tNormalizedDNA\tF1\n1.SKB4.640189.Test.plate.3.D1\tSample\t384PP_AQ_BP2_HT\tH1\t12.068\t415.0\tNormalizedDNA\tH1\n1.SKB5.640181.Test.plate.3.E1\tSample\t384PP_AQ_BP2_HT\tJ1\t12.068\t415.0\tNormalizedDNA\tJ1\n1.SKB6.640176.Test.plate.3.F1\tSample\t384PP_AQ_BP2_HT\tL1\t12.068\t415.0\tNormalizedDNA\tL1\nvibrio.positive.control.Test.plate.3.G1\tSample\t384PP_AQ_BP2_HT\tN1\t6.089\t820.0\tNormalizedDNA\tN1\nblank.Test.plate.3.H1\tSample\t384PP_AQ_BP2_HT\tP1\t0.342\t3500.0\tNormalizedDNA\tP1\n1.SKB1.640202.Test.plate.3.A2\tSample\t384PP_AQ_BP2_HT\tB3\t12.068\t415.0\tNormalizedDNA\tB3\n1.SKB2.640194.Test.plate.3.B2\tSample\t384PP_AQ_BP2_HT\tD3\t12.068\t415.0\tNormalizedDNA\tD3\n1.SKB3.640195.Test.plate.3.C2\tSample\t384PP_AQ_BP2_HT\tF3\t12.068\t415.0\tNormalizedDNA\tF3\n1.SKB4.640189.Test.plate.3.D2\tSample\t384PP_AQ_BP2_HT\tH3\t12.068\t415.0\tNormalizedDNA\tH3\n1.SKB5.640181.Test.plate.3.E2\tSample\t384PP_AQ_BP2_HT\tJ3\t12.068\t415.0\tNormalizedDNA\tJ3\n1.SKB6.640176.Test.plate.3.F2\tSample\t384PP_AQ_BP2_HT\tL3\t12.068\t415.0\tNormalizedDNA\tL3\nvibrio.positive.control.Test.plate.3.G2\tSample\t384PP_AQ_BP2_HT\tN3\t6.089\t820.0\tNormalizedDNA\tN3\nblank.Test.plate.3.H2\tSample\t384PP_AQ_BP2_HT\tP3\t0.342\t3500.0\tNormalizedDNA\tP3\n1.SKB1.640202.Test.plate.3.A3\tSample\t384PP_AQ_BP2_HT\tB5\t12.068\t415.0\tNormalizedDNA\tB5\n1.SKB2.640194.Test.plate.3.B3\tSample\t384PP_AQ_BP2_HT\tD5\t12.068\t415.0\tNormalizedDNA\tD5\n1.SKB3.640195.Test.plate.3.C3\tSample\t384PP_AQ_BP2_HT\tF5\t12.068\t415.0\tNormalizedDNA\tF5\n1.SKB4.640189.Test.plate.3.D3\tSample\t384PP_AQ_BP2_HT\tH5\t12.068\t415.0\tNormalizedDNA\tH5\n1.SKB5.640181.Test.plate.3.E3\tSample\t384PP_AQ_BP2_HT\tJ5\t12.068\t415.0\tNormalizedDNA\tJ5\n1.SKB6.640176.Test.plate.3.F3\tSample\t384PP_AQ_BP2_HT\tL5\t12.068\t415.0\tNormalizedDNA\tL5\nvibrio.positive.control.Test.plate.3.G3\tSample\t384PP_AQ_BP2_HT\tN5\t6.089\t820.0\tNormalizedDNA\tN5\nblank.Test.plate.3.H3\tSample\t384PP_AQ_BP2_HT\tP5\t0.342\t3500.0\tNormalizedDNA\tP5\n1.SKB1.640202.Test.plate.3.A4\tSample\t384PP_AQ_BP2_HT\tB7\t12.068\t415.0\tNormalizedDNA\tB7\n1.SKB2.640194.Test.plate.3.B4\tSample\t384PP_AQ_BP2_HT\tD7\t12.068\t415.0\tNormalizedDNA\tD7\n1.SKB3.640195.Test.plate.3.C4\tSample\t384PP_AQ_BP2_HT\tF7\t12.068\t415.0\tNormalizedDNA\tF7\n1.SKB4.640189.Test.plate.3.D4\tSample\t384PP_AQ_BP2_HT\tH7\t12.068\t415.0\tNormalizedDNA\tH7\n1.SKB5.640181.Test.plate.3.E4\tSample\t384PP_AQ_BP2_HT\tJ7\t12.068\t415.0\tNormalizedDNA\tJ7\n1.SKB6.640176.Test.plate.3.F4\tSample\t384PP_AQ_BP2_HT\tL7\t12.068\t415.0\tNormalizedDNA\tL7\nvibrio.positive.control.Test.plate.3.G4\tSample\t384PP_AQ_BP2_HT\tN7\t6.089\t820.0\tNormalizedDNA\tN7\nblank.Test.plate.3.H4\tSample\t384PP_AQ_BP2_HT\tP7\t0.342\t3500.0\tNormalizedDNA\tP7\n1.SKB1.640202.Test.plate.3.A5\tSample\t384PP_AQ_BP2_HT\tB9\t12.068\t415.0\tNormalizedDNA\tB9\n1.SKB2.640194.Test.plate.3.B5\tSample\t384PP_AQ_BP2_HT\tD9\t12.068\t415.0\tNormalizedDNA\tD9\n1.SKB3.640195.Test.plate.3.C5\tSample\t384PP_AQ_BP2_HT\tF9\t12.068\t415.0\tNormalizedDNA\tF9\n1.SKB4.640189.Test.plate.3.D5\tSample\t384PP_AQ_BP2_HT\tH9\t12.068\t415.0\tNormalizedDNA\tH9\n1.SKB5.640181.Test.plate.3.E5\tSample\t384PP_AQ_BP2_HT\tJ9\t12.068\t415.0\tNormalizedDNA\tJ9\n1.SKB6.640176.Test.plate.3.F5\tSample\t384PP_AQ_BP2_HT\tL9\t12.068\t415.0\tNormalizedDNA\tL9\nvibrio.positive.control.Test.plate.3.G5\tSample\t384PP_AQ_BP2_HT\tN9\t6.089\t820.0\tNormalizedDNA\tN9\nblank.Test.plate.3.H5\tSample\t384PP_AQ_BP2_HT\tP9\t0.342\t3500.0\tNormalizedDNA\tP9\n1.SKB1.640202.Test.plate.3.A6\tSample\t384PP_AQ_BP2_HT\tB11\t12.068\t415.0\tNormalizedDNA\tB11\n1.SKB2.640194.Test.plate.3.B6\tSample\t384PP_AQ_BP2_HT\tD11\t12.068\t415.0\tNormalizedDNA\tD11\n1.SKB3.640195.Test.plate.3.C6\tSample\t384PP_AQ_BP2_HT\tF11\t12.068\t415.0\tNormalizedDNA\tF11\n1.SKB4.640189.Test.plate.3.D6\tSample\t384PP_AQ_BP2_HT\tH11\t12.068\t415.0\tNormalizedDNA\tH11\n1.SKB5.640181.Test.plate.3.E6\tSample\t384PP_AQ_BP2_HT\tJ11\t12.068\t415.0\tNormalizedDNA\tJ11\n1.SKB6.640176.Test.plate.3.F6\tSample\t384PP_AQ_BP2_HT\tL11\t12.068\t415.0\tNormalizedDNA\tL11\nvibrio.positive.control.Test.plate.3.G6\tSample\t384PP_AQ_BP2_HT\tN11\t6.089\t820.0\tNormalizedDNA\tN11\nblank.Test.plate.3.H6\tSample\t384PP_AQ_BP2_HT\tP11\t0.342\t3500.0\tNormalizedDNA\tP11\n1.SKB1.640202.Test.plate.3.A7\tSample\t384PP_AQ_BP2_HT\tB13\t12.068\t415.0\tNormalizedDNA\tB13\n1.SKB2.640194.Test.plate.3.B7\tSample\t384PP_AQ_BP2_HT\tD13\t12.068\t415.0\tNormalizedDNA\tD13\n1.SKB3.640195.Test.plate.3.C7\tSample\t384PP_AQ_BP2_HT\tF13\t12.068\t415.0\tNormalizedDNA\tF13\n1.SKB4.640189.Test.plate.3.D7\tSample\t384PP_AQ_BP2_HT\tH13\t12.068\t415.0\tNormalizedDNA\tH13\n1.SKB5.640181.Test.plate.3.E7\tSample\t384PP_AQ_BP2_HT\tJ13\t12.068\t415.0\tNormalizedDNA\tJ13\n1.SKB6.640176.Test.plate.3.F7\tSample\t384PP_AQ_BP2_HT\tL13\t12.068\t415.0\tNormalizedDNA\tL13\nvibrio.positive.control.Test.plate.3.G7\tSample\t384PP_AQ_BP2_HT\tN13\t6.089\t820.0\tNormalizedDNA\tN13\nblank.Test.plate.3.H7\tSample\t384PP_AQ_BP2_HT\tP13\t0.342\t3500.0\tNormalizedDNA\tP13\n1.SKB1.640202.Test.plate.3.A8\tSample\t384PP_AQ_BP2_HT\tB15\t12.068\t415.0\tNormalizedDNA\tB15\n1.SKB2.640194.Test.plate.3.B8\tSample\t384PP_AQ_BP2_HT\tD15\t12.068\t415.0\tNormalizedDNA\tD15\n1.SKB3.640195.Test.plate.3.C8\tSample\t384PP_AQ_BP2_HT\tF15\t12.068\t415.0\tNormalizedDNA\tF15\n1.SKB4.640189.Test.plate.3.D8\tSample\t384PP_AQ_BP2_HT\tH15\t12.068\t415.0\tNormalizedDNA\tH15\n1.SKB5.640181.Test.plate.3.E8\tSample\t384PP_AQ_BP2_HT\tJ15\t12.068\t415.0\tNormalizedDNA\tJ15\n1.SKB6.640176.Test.plate.3.F8\tSample\t384PP_AQ_BP2_HT\tL15\t12.068\t415.0\tNormalizedDNA\tL15\nvibrio.positive.control.Test.plate.3.G8\tSample\t384PP_AQ_BP2_HT\tN15\t6.089\t820.0\tNormalizedDNA\tN15\nblank.Test.plate.3.H8\tSample\t384PP_AQ_BP2_HT\tP15\t0.342\t3500.0\tNormalizedDNA\tP15\n1.SKB1.640202.Test.plate.3.A9\tSample\t384PP_AQ_BP2_HT\tB17\t12.068\t415.0\tNormalizedDNA\tB17\n1.SKB2.640194.Test.plate.3.B9\tSample\t384PP_AQ_BP2_HT\tD17\t12.068\t415.0\tNormalizedDNA\tD17\n1.SKB3.640195.Test.plate.3.C9\tSample\t384PP_AQ_BP2_HT\tF17\t12.068\t415.0\tNormalizedDNA\tF17\n1.SKB4.640189.Test.plate.3.D9\tSample\t384PP_AQ_BP2_HT\tH17\t12.068\t415.0\tNormalizedDNA\tH17\n1.SKB5.640181.Test.plate.3.E9\tSample\t384PP_AQ_BP2_HT\tJ17\t12.068\t415.0\tNormalizedDNA\tJ17\n1.SKB6.640176.Test.plate.3.F9\tSample\t384PP_AQ_BP2_HT\tL17\t12.068\t415.0\tNormalizedDNA\tL17\nvibrio.positive.control.Test.plate.3.G9\tSample\t384PP_AQ_BP2_HT\tN17\t6.089\t820.0\tNormalizedDNA\tN17\nblank.Test.plate.3.H9\tSample\t384PP_AQ_BP2_HT\tP17\t0.342\t3500.0\tNormalizedDNA\tP17\n1.SKB1.640202.Test.plate.3.A10\tSample\t384PP_AQ_BP2_HT\tB19\t12.068\t415.0\tNormalizedDNA\tB19\n1.SKB2.640194.Test.plate.3.B10\tSample\t384PP_AQ_BP2_HT\tD19\t12.068\t415.0\tNormalizedDNA\tD19\n1.SKB3.640195.Test.plate.3.C10\tSample\t384PP_AQ_BP2_HT\tF19\t12.068\t415.0\tNormalizedDNA\tF19\n1.SKB4.640189.Test.plate.3.D10\tSample\t384PP_AQ_BP2_HT\tH19\t12.068\t415.0\tNormalizedDNA\tH19\n1.SKB5.640181.Test.plate.3.E10\tSample\t384PP_AQ_BP2_HT\tJ19\t12.068\t415.0\tNormalizedDNA\tJ19\n1.SKB6.640176.Test.plate.3.F10\tSample\t384PP_AQ_BP2_HT\tL19\t12.068\t415.0\tNormalizedDNA\tL19\nvibrio.positive.control.Test.plate.3.G10\tSample\t384PP_AQ_BP2_HT\tN19\t6.089\t820.0\tNormalizedDNA\tN19\nblank.Test.plate.3.H10\tSample\t384PP_AQ_BP2_HT\tP19\t0.342\t3500.0\tNormalizedDNA\tP19\n1.SKB1.640202.Test.plate.3.A11\tSample\t384PP_AQ_BP2_HT\tB21\t12.068\t415.0\tNormalizedDNA\tB21\n1.SKB2.640194.Test.plate.3.B11\tSample\t384PP_AQ_BP2_HT\tD21\t12.068\t415.0\tNormalizedDNA\tD21\n1.SKB3.640195.Test.plate.3.C11\tSample\t384PP_AQ_BP2_HT\tF21\t12.068\t415.0\tNormalizedDNA\tF21\n1.SKB4.640189.Test.plate.3.D11\tSample\t384PP_AQ_BP2_HT\tH21\t12.068\t415.0\tNormalizedDNA\tH21\n1.SKB5.640181.Test.plate.3.E11\tSample\t384PP_AQ_BP2_HT\tJ21\t12.068\t415.0\tNormalizedDNA\tJ21\n1.SKB6.640176.Test.plate.3.F11\tSample\t384PP_AQ_BP2_HT\tL21\t12.068\t415.0\tNormalizedDNA\tL21\nvibrio.positive.control.Test.plate.3.G11\tSample\t384PP_AQ_BP2_HT\tN21\t6.089\t820.0\tNormalizedDNA\tN21\nblank.Test.plate.3.H11\tSample\t384PP_AQ_BP2_HT\tP21\t0.342\t3500.0\tNormalizedDNA\tP21\n1.SKB1.640202.Test.plate.3.A12\tSample\t384PP_AQ_BP2_HT\tB23\t12.068\t415.0\tNormalizedDNA\tB23\n1.SKB2.640194.Test.plate.3.B12\tSample\t384PP_AQ_BP2_HT\tD23\t12.068\t415.0\tNormalizedDNA\tD23\n1.SKB3.640195.Test.plate.3.C12\tSample\t384PP_AQ_BP2_HT\tF23\t12.068\t415.0\tNormalizedDNA\tF23\n1.SKB4.640189.Test.plate.3.D12\tSample\t384PP_AQ_BP2_HT\tH23\t12.068\t415.0\tNormalizedDNA\tH23\n1.SKB5.640181.Test.plate.3.E12\tSample\t384PP_AQ_BP2_HT\tJ23\t12.068\t415.0\tNormalizedDNA\tJ23\n1.SKD5.640186.Test.plate.3.F12\tSample\t384PP_AQ_BP2_HT\tL23\t12.068\t415.0\tNormalizedDNA\tL23\nvibrio.positive.control.Test.plate.3.G12\tSample\t384PP_AQ_BP2_HT\tN23\t6.089\t820.0\tNormalizedDNA\tN23\n1.SKB1.640202.Test.plate.4.A1\tSample\t384PP_AQ_BP2_HT\tB2\t12.068\t415.0\tNormalizedDNA\tB2\n1.SKB2.640194.Test.plate.4.B1\tSample\t384PP_AQ_BP2_HT\tD2\t12.068\t415.0\tNormalizedDNA\tD2\n1.SKB3.640195.Test.plate.4.C1\tSample\t384PP_AQ_BP2_HT\tF2\t12.068\t415.0\tNormalizedDNA\tF2\n1.SKB4.640189.Test.plate.4.D1\tSample\t384PP_AQ_BP2_HT\tH2\t12.068\t415.0\tNormalizedDNA\tH2\n1.SKB5.640181.Test.plate.4.E1\tSample\t384PP_AQ_BP2_HT\tJ2\t12.068\t415.0\tNormalizedDNA\tJ2\n1.SKB6.640176.Test.plate.4.F1\tSample\t384PP_AQ_BP2_HT\tL2\t12.068\t415.0\tNormalizedDNA\tL2\nvibrio.positive.control.Test.plate.4.G1\tSample\t384PP_AQ_BP2_HT\tN2\t6.089\t820.0\tNormalizedDNA\tN2\nblank.Test.plate.4.H1\tSample\t384PP_AQ_BP2_HT\tP2\t0.342\t3500.0\tNormalizedDNA\tP2\n1.SKB1.640202.Test.plate.4.A2\tSample\t384PP_AQ_BP2_HT\tB4\t12.068\t415.0\tNormalizedDNA\tB4\n1.SKB2.640194.Test.plate.4.B2\tSample\t384PP_AQ_BP2_HT\tD4\t12.068\t415.0\tNormalizedDNA\tD4\n1.SKB3.640195.Test.plate.4.C2\tSample\t384PP_AQ_BP2_HT\tF4\t12.068\t415.0\tNormalizedDNA\tF4\n1.SKB4.640189.Test.plate.4.D2\tSample\t384PP_AQ_BP2_HT\tH4\t12.068\t415.0\tNormalizedDNA\tH4\n1.SKB5.640181.Test.plate.4.E2\tSample\t384PP_AQ_BP2_HT\tJ4\t12.068\t415.0\tNormalizedDNA\tJ4\n1.SKB6.640176.Test.plate.4.F2\tSample\t384PP_AQ_BP2_HT\tL4\t12.068\t415.0\tNormalizedDNA\tL4\nvibrio.positive.control.Test.plate.4.G2\tSample\t384PP_AQ_BP2_HT\tN4\t6.089\t820.0\tNormalizedDNA\tN4\nblank.Test.plate.4.H2\tSample\t384PP_AQ_BP2_HT\tP4\t0.342\t3500.0\tNormalizedDNA\tP4\n1.SKB1.640202.Test.plate.4.A3\tSample\t384PP_AQ_BP2_HT\tB6\t12.068\t415.0\tNormalizedDNA\tB6\n1.SKB2.640194.Test.plate.4.B3\tSample\t384PP_AQ_BP2_HT\tD6\t12.068\t415.0\tNormalizedDNA\tD6\n1.SKB3.640195.Test.plate.4.C3\tSample\t384PP_AQ_BP2_HT\tF6\t12.068\t415.0\tNormalizedDNA\tF6\n1.SKB4.640189.Test.plate.4.D3\tSample\t384PP_AQ_BP2_HT\tH6\t12.068\t415.0\tNormalizedDNA\tH6\n1.SKB5.640181.Test.plate.4.E3\tSample\t384PP_AQ_BP2_HT\tJ6\t12.068\t415.0\tNormalizedDNA\tJ6\n1.SKB6.640176.Test.plate.4.F3\tSample\t384PP_AQ_BP2_HT\tL6\t12.068\t415.0\tNormalizedDNA\tL6\nvibrio.positive.control.Test.plate.4.G3\tSample\t384PP_AQ_BP2_HT\tN6\t6.089\t820.0\tNormalizedDNA\tN6\nblank.Test.plate.4.H3\tSample\t384PP_AQ_BP2_HT\tP6\t0.342\t3500.0\tNormalizedDNA\tP6\n1.SKB1.640202.Test.plate.4.A4\tSample\t384PP_AQ_BP2_HT\tB8\t12.068\t415.0\tNormalizedDNA\tB8\n1.SKB2.640194.Test.plate.4.B4\tSample\t384PP_AQ_BP2_HT\tD8\t12.068\t415.0\tNormalizedDNA\tD8\n1.SKB3.640195.Test.plate.4.C4\tSample\t384PP_AQ_BP2_HT\tF8\t12.068\t415.0\tNormalizedDNA\tF8\n1.SKB4.640189.Test.plate.4.D4\tSample\t384PP_AQ_BP2_HT\tH8\t12.068\t415.0\tNormalizedDNA\tH8\n1.SKB5.640181.Test.plate.4.E4\tSample\t384PP_AQ_BP2_HT\tJ8\t12.068\t415.0\tNormalizedDNA\tJ8\n1.SKB6.640176.Test.plate.4.F4\tSample\t384PP_AQ_BP2_HT\tL8\t12.068\t415.0\tNormalizedDNA\tL8\nvibrio.positive.control.Test.plate.4.G4\tSample\t384PP_AQ_BP2_HT\tN8\t6.089\t820.0\tNormalizedDNA\tN8\nblank.Test.plate.4.H4\tSample\t384PP_AQ_BP2_HT\tP8\t0.342\t3500.0\tNormalizedDNA\tP8\n1.SKB1.640202.Test.plate.4.A5\tSample\t384PP_AQ_BP2_HT\tB10\t12.068\t415.0\tNormalizedDNA\tB10\n1.SKB2.640194.Test.plate.4.B5\tSample\t384PP_AQ_BP2_HT\tD10\t12.068\t415.0\tNormalizedDNA\tD10\n1.SKB3.640195.Test.plate.4.C5\tSample\t384PP_AQ_BP2_HT\tF10\t12.068\t415.0\tNormalizedDNA\tF10\n1.SKB4.640189.Test.plate.4.D5\tSample\t384PP_AQ_BP2_HT\tH10\t12.068\t415.0\tNormalizedDNA\tH10\n1.SKB5.640181.Test.plate.4.E5\tSample\t384PP_AQ_BP2_HT\tJ10\t12.068\t415.0\tNormalizedDNA\tJ10\n1.SKB6.640176.Test.plate.4.F5\tSample\t384PP_AQ_BP2_HT\tL10\t12.068\t415.0\tNormalizedDNA\tL10\nvibrio.positive.control.Test.plate.4.G5\tSample\t384PP_AQ_BP2_HT\tN10\t6.089\t820.0\tNormalizedDNA\tN10\nblank.Test.plate.4.H5\tSample\t384PP_AQ_BP2_HT\tP10\t0.342\t3500.0\tNormalizedDNA\tP10\n1.SKB1.640202.Test.plate.4.A6\tSample\t384PP_AQ_BP2_HT\tB12\t12.068\t415.0\tNormalizedDNA\tB12\n1.SKB2.640194.Test.plate.4.B6\tSample\t384PP_AQ_BP2_HT\tD12\t12.068\t415.0\tNormalizedDNA\tD12\n1.SKB3.640195.Test.plate.4.C6\tSample\t384PP_AQ_BP2_HT\tF12\t12.068\t415.0\tNormalizedDNA\tF12\n1.SKB4.640189.Test.plate.4.D6\tSample\t384PP_AQ_BP2_HT\tH12\t12.068\t415.0\tNormalizedDNA\tH12\n1.SKB5.640181.Test.plate.4.E6\tSample\t384PP_AQ_BP2_HT\tJ12\t12.068\t415.0\tNormalizedDNA\tJ12\n1.SKB6.640176.Test.plate.4.F6\tSample\t384PP_AQ_BP2_HT\tL12\t12.068\t415.0\tNormalizedDNA\tL12\nvibrio.positive.control.Test.plate.4.G6\tSample\t384PP_AQ_BP2_HT\tN12\t6.089\t820.0\tNormalizedDNA\tN12\nblank.Test.plate.4.H6\tSample\t384PP_AQ_BP2_HT\tP12\t0.342\t3500.0\tNormalizedDNA\tP12\n1.SKB1.640202.Test.plate.4.A7\tSample\t384PP_AQ_BP2_HT\tB14\t12.068\t415.0\tNormalizedDNA\tB14\n1.SKB2.640194.Test.plate.4.B7\tSample\t384PP_AQ_BP2_HT\tD14\t12.068\t415.0\tNormalizedDNA\tD14\n1.SKB3.640195.Test.plate.4.C7\tSample\t384PP_AQ_BP2_HT\tF14\t12.068\t415.0\tNormalizedDNA\tF14\n1.SKB4.640189.Test.plate.4.D7\tSample\t384PP_AQ_BP2_HT\tH14\t12.068\t415.0\tNormalizedDNA\tH14\n1.SKB5.640181.Test.plate.4.E7\tSample\t384PP_AQ_BP2_HT\tJ14\t12.068\t415.0\tNormalizedDNA\tJ14\n1.SKB6.640176.Test.plate.4.F7\tSample\t384PP_AQ_BP2_HT\tL14\t12.068\t415.0\tNormalizedDNA\tL14\nvibrio.positive.control.Test.plate.4.G7\tSample\t384PP_AQ_BP2_HT\tN14\t6.089\t820.0\tNormalizedDNA\tN14\nblank.Test.plate.4.H7\tSample\t384PP_AQ_BP2_HT\tP14\t0.342\t3500.0\tNormalizedDNA\tP14\n1.SKB1.640202.Test.plate.4.A8\tSample\t384PP_AQ_BP2_HT\tB16\t12.068\t415.0\tNormalizedDNA\tB16\n1.SKB2.640194.Test.plate.4.B8\tSample\t384PP_AQ_BP2_HT\tD16\t12.068\t415.0\tNormalizedDNA\tD16\n1.SKB3.640195.Test.plate.4.C8\tSample\t384PP_AQ_BP2_HT\tF16\t12.068\t415.0\tNormalizedDNA\tF16\n1.SKB4.640189.Test.plate.4.D8\tSample\t384PP_AQ_BP2_HT\tH16\t12.068\t415.0\tNormalizedDNA\tH16\n1.SKB5.640181.Test.plate.4.E8\tSample\t384PP_AQ_BP2_HT\tJ16\t12.068\t415.0\tNormalizedDNA\tJ16\n1.SKB6.640176.Test.plate.4.F8\tSample\t384PP_AQ_BP2_HT\tL16\t12.068\t415.0\tNormalizedDNA\tL16\nvibrio.positive.control.Test.plate.4.G8\tSample\t384PP_AQ_BP2_HT\tN16\t6.089\t820.0\tNormalizedDNA\tN16\nblank.Test.plate.4.H8\tSample\t384PP_AQ_BP2_HT\tP16\t0.342\t3500.0\tNormalizedDNA\tP16\n1.SKB1.640202.Test.plate.4.A9\tSample\t384PP_AQ_BP2_HT\tB18\t12.068\t415.0\tNormalizedDNA\tB18\n1.SKB2.640194.Test.plate.4.B9\tSample\t384PP_AQ_BP2_HT\tD18\t12.068\t415.0\tNormalizedDNA\tD18\n1.SKB3.640195.Test.plate.4.C9\tSample\t384PP_AQ_BP2_HT\tF18\t12.068\t415.0\tNormalizedDNA\tF18\n1.SKB4.640189.Test.plate.4.D9\tSample\t384PP_AQ_BP2_HT\tH18\t12.068\t415.0\tNormalizedDNA\tH18\n1.SKB5.640181.Test.plate.4.E9\tSample\t384PP_AQ_BP2_HT\tJ18\t12.068\t415.0\tNormalizedDNA\tJ18\n1.SKB6.640176.Test.plate.4.F9\tSample\t384PP_AQ_BP2_HT\tL18\t12.068\t415.0\tNormalizedDNA\tL18\nvibrio.positive.control.Test.plate.4.G9\tSample\t384PP_AQ_BP2_HT\tN18\t6.089\t820.0\tNormalizedDNA\tN18\nblank.Test.plate.4.H9\tSample\t384PP_AQ_BP2_HT\tP18\t0.342\t3500.0\tNormalizedDNA\tP18\n1.SKB1.640202.Test.plate.4.A10\tSample\t384PP_AQ_BP2_HT\tB20\t12.068\t415.0\tNormalizedDNA\tB20\n1.SKB2.640194.Test.plate.4.B10\tSample\t384PP_AQ_BP2_HT\tD20\t12.068\t415.0\tNormalizedDNA\tD20\n1.SKB3.640195.Test.plate.4.C10\tSample\t384PP_AQ_BP2_HT\tF20\t12.068\t415.0\tNormalizedDNA\tF20\n1.SKB4.640189.Test.plate.4.D10\tSample\t384PP_AQ_BP2_HT\tH20\t12.068\t415.0\tNormalizedDNA\tH20\n1.SKB5.640181.Test.plate.4.E10\tSample\t384PP_AQ_BP2_HT\tJ20\t12.068\t415.0\tNormalizedDNA\tJ20\n1.SKB6.640176.Test.plate.4.F10\tSample\t384PP_AQ_BP2_HT\tL20\t12.068\t415.0\tNormalizedDNA\tL20\nvibrio.positive.control.Test.plate.4.G10\tSample\t384PP_AQ_BP2_HT\tN20\t6.089\t820.0\tNormalizedDNA\tN20\nblank.Test.plate.4.H10\tSample\t384PP_AQ_BP2_HT\tP20\t0.342\t3500.0\tNormalizedDNA\tP20\n1.SKB1.640202.Test.plate.4.A11\tSample\t384PP_AQ_BP2_HT\tB22\t12.068\t415.0\tNormalizedDNA\tB22\n1.SKB2.640194.Test.plate.4.B11\tSample\t384PP_AQ_BP2_HT\tD22\t12.068\t415.0\tNormalizedDNA\tD22\n1.SKB3.640195.Test.plate.4.C11\tSample\t384PP_AQ_BP2_HT\tF22\t12.068\t415.0\tNormalizedDNA\tF22\n1.SKB4.640189.Test.plate.4.D11\tSample\t384PP_AQ_BP2_HT\tH22\t12.068\t415.0\tNormalizedDNA\tH22\n1.SKB5.640181.Test.plate.4.E11\tSample\t384PP_AQ_BP2_HT\tJ22\t12.068\t415.0\tNormalizedDNA\tJ22\n1.SKB6.640176.Test.plate.4.F11\tSample\t384PP_AQ_BP2_HT\tL22\t12.068\t415.0\tNormalizedDNA\tL22\nvibrio.positive.control.Test.plate.4.G11\tSample\t384PP_AQ_BP2_HT\tN22\t6.089\t820.0\tNormalizedDNA\tN22\nblank.Test.plate.4.H11\tSample\t384PP_AQ_BP2_HT\tP22\t0.342\t3500.0\tNormalizedDNA\tP22\n1.SKB1.640202.Test.plate.4.A12\tSample\t384PP_AQ_BP2_HT\tB24\t12.068\t415.0\tNormalizedDNA\tB24\n1.SKB2.640194.Test.plate.4.B12\tSample\t384PP_AQ_BP2_HT\tD24\t12.068\t415.0\tNormalizedDNA\tD24\n1.SKB3.640195.Test.plate.4.C12\tSample\t384PP_AQ_BP2_HT\tF24\t12.068\t415.0\tNormalizedDNA\tF24\n1.SKB4.640189.Test.plate.4.D12\tSample\t384PP_AQ_BP2_HT\tH24\t12.068\t415.0\tNormalizedDNA\tH24\n1.SKB5.640181.Test.plate.4.E12\tSample\t384PP_AQ_BP2_HT\tJ24\t12.068\t415.0\tNormalizedDNA\tJ24\n1.SKM6.640187.Test.plate.4.F12\tSample\t384PP_AQ_BP2_HT\tL24\t12.068\t415.0\tNormalizedDNA\tL24\nvibrio.positive.control.Test.plate.4.G12\tSample\t384PP_AQ_BP2_HT\tN24\t6.089\t820.0\tNormalizedDNA\tN24'
EXPERIMENTAL_SAMPLES_PREP_EXAMPLE = """sample_name\tBARCODE\tPRIMER\tPrimer_Plate\tWell_ID\tPlating\tExtractionKit_lot\tExtraction_robot\tTM1000_8_tool\tPrimer_date\tMasterMix_lot\tWater_Lot\tProcessing_robot\tTM300_8_tool\tTM50_8_tool\tSample_Plate\tProject_name\tOrig_name\tWell_description\tEXPERIMENT_DESIGN_DESCRIPTION\tLIBRARY_CONSTRUCTION_PROTOCOL\tLINKER\tPLATFORM\tRUN_CENTER\tRUN_DATE\tRUN_PREFIX\tpcr_primers\tsequencing_meth\ttarget_gene\ttarget_subfragment\tcenter_name\tcenter_project_name\tINSTRUMENT_MODEL\tRUNID\n1.SKB1.640202.Test.plate.1.A1\tAGCCTTCGTCGC\tGTGTGYCAGCMGCCGCGGTAA\t1\tA1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB1.640202\tTest plate 1_1.SKB1.640202.Test.plate.1.A1_A1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.1.A2\tTCCATACCGGAA\tGTGTGYCAGCMGCCGCGGTAA\t1\tA2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB1.640202\tTest plate 1_1.SKB1.640202.Test.plate.1.A2_A2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.1.A3\tAGCCCTGCTACA\tGTGTGYCAGCMGCCGCGGTAA\t1\tA3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB1.640202\tTest plate 1_1.SKB1.640202.Test.plate.1.A3_A3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.1.A4\tCCTAACGGTCCA\tGTGTGYCAGCMGCCGCGGTAA\t1\tA4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB1.640202\tTest plate 1_1.SKB1.640202.Test.plate.1.A4_A4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.1.A5\tCGCGCCTTAAAC\tGTGTGYCAGCMGCCGCGGTAA\t1\tA5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB1.640202\tTest plate 1_1.SKB1.640202.Test.plate.1.A5_A5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.1.A6\tTATGGTACCCAG\tGTGTGYCAGCMGCCGCGGTAA\t1\tA6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB1.640202\tTest plate 1_1.SKB1.640202.Test.plate.1.A6_A6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.1.A7\tTACAATATCTGT\tGTGTGYCAGCMGCCGCGGTAA\t1\tA7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB1.640202\tTest plate 1_1.SKB1.640202.Test.plate.1.A7_A7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.1.A8\tAATTTAGGTAGG\tGTGTGYCAGCMGCCGCGGTAA\t1\tA8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB1.640202\tTest plate 1_1.SKB1.640202.Test.plate.1.A8_A8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.1.A9\tGACTCAACCAGT\tGTGTGYCAGCMGCCGCGGTAA\t1\tA9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB1.640202\tTest plate 1_1.SKB1.640202.Test.plate.1.A9_A9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.1.A10\tGCCTCTACGTCG\tGTGTGYCAGCMGCCGCGGTAA\t1\tA10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB1.640202\tTest plate 1_1.SKB1.640202.Test.plate.1.A10_A10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.1.A11\tACTACTGAGGAT\tGTGTGYCAGCMGCCGCGGTAA\t1\tA11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB1.640202\tTest plate 1_1.SKB1.640202.Test.plate.1.A11_A11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.1.A12\tAATTCACCTCCT\tGTGTGYCAGCMGCCGCGGTAA\t1\tA12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB1.640202\tTest plate 1_1.SKB1.640202.Test.plate.1.A12_A12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.1.B1\tCGTATAAATGCG\tGTGTGYCAGCMGCCGCGGTAA\t1\tB1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB2.640194\tTest plate 1_1.SKB2.640194.Test.plate.1.B1_B1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.1.B2\tATGCTGCAACAC\tGTGTGYCAGCMGCCGCGGTAA\t1\tB2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB2.640194\tTest plate 1_1.SKB2.640194.Test.plate.1.B2_B2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.1.B3\tACTCGCTCGCTG\tGTGTGYCAGCMGCCGCGGTAA\t1\tB3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB2.640194\tTest plate 1_1.SKB2.640194.Test.plate.1.B3_B3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.1.B4\tTTCCTTAGTAGT\tGTGTGYCAGCMGCCGCGGTAA\t1\tB4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB2.640194\tTest plate 1_1.SKB2.640194.Test.plate.1.B4_B4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.1.B5\tCGTCCGTATGAA\tGTGTGYCAGCMGCCGCGGTAA\t1\tB5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB2.640194\tTest plate 1_1.SKB2.640194.Test.plate.1.B5_B5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.1.B6\tACGTGAGGAACG\tGTGTGYCAGCMGCCGCGGTAA\t1\tB6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB2.640194\tTest plate 1_1.SKB2.640194.Test.plate.1.B6_B6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.1.B7\tGGTTGCCCTGTA\tGTGTGYCAGCMGCCGCGGTAA\t1\tB7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB2.640194\tTest plate 1_1.SKB2.640194.Test.plate.1.B7_B7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.1.B8\tCATATAGCCCGA\tGTGTGYCAGCMGCCGCGGTAA\t1\tB8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB2.640194\tTest plate 1_1.SKB2.640194.Test.plate.1.B8_B8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.1.B9\tGCCTATGAGATC\tGTGTGYCAGCMGCCGCGGTAA\t1\tB9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB2.640194\tTest plate 1_1.SKB2.640194.Test.plate.1.B9_B9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.1.B10\tCAAGTGAAGGGA\tGTGTGYCAGCMGCCGCGGTAA\t1\tB10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB2.640194\tTest plate 1_1.SKB2.640194.Test.plate.1.B10_B10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.1.B11\tCACGTTTATTCC\tGTGTGYCAGCMGCCGCGGTAA\t1\tB11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB2.640194\tTest plate 1_1.SKB2.640194.Test.plate.1.B11_B11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.1.B12\tTAATCGGTGCCA\tGTGTGYCAGCMGCCGCGGTAA\t1\tB12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB2.640194\tTest plate 1_1.SKB2.640194.Test.plate.1.B12_B12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.1.C1\tTGACTAATGGCC\tGTGTGYCAGCMGCCGCGGTAA\t1\tC1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB3.640195\tTest plate 1_1.SKB3.640195.Test.plate.1.C1_C1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.1.C2\tCGGGACACCCGA\tGTGTGYCAGCMGCCGCGGTAA\t1\tC2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB3.640195\tTest plate 1_1.SKB3.640195.Test.plate.1.C2_C2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.1.C3\tCTGTCTATACTA\tGTGTGYCAGCMGCCGCGGTAA\t1\tC3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB3.640195\tTest plate 1_1.SKB3.640195.Test.plate.1.C3_C3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.1.C4\tTATGCCAGAGAT\tGTGTGYCAGCMGCCGCGGTAA\t1\tC4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB3.640195\tTest plate 1_1.SKB3.640195.Test.plate.1.C4_C4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.1.C5\tCGTTTGGAATGA\tGTGTGYCAGCMGCCGCGGTAA\t1\tC5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB3.640195\tTest plate 1_1.SKB3.640195.Test.plate.1.C5_C5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.1.C6\tAAGAACTCATGA\tGTGTGYCAGCMGCCGCGGTAA\t1\tC6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB3.640195\tTest plate 1_1.SKB3.640195.Test.plate.1.C6_C6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.1.C7\tTGATATCGTCTT\tGTGTGYCAGCMGCCGCGGTAA\t1\tC7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB3.640195\tTest plate 1_1.SKB3.640195.Test.plate.1.C7_C7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.1.C8\tCGGTGACCTACT\tGTGTGYCAGCMGCCGCGGTAA\t1\tC8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB3.640195\tTest plate 1_1.SKB3.640195.Test.plate.1.C8_C8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.1.C9\tAATGCGCGTATA\tGTGTGYCAGCMGCCGCGGTAA\t1\tC9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB3.640195\tTest plate 1_1.SKB3.640195.Test.plate.1.C9_C9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.1.C10\tCTTGATTCTTGA\tGTGTGYCAGCMGCCGCGGTAA\t1\tC10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB3.640195\tTest plate 1_1.SKB3.640195.Test.plate.1.C10_C10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.1.C11\tGAAATCTTGAAG\tGTGTGYCAGCMGCCGCGGTAA\t1\tC11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB3.640195\tTest plate 1_1.SKB3.640195.Test.plate.1.C11_C11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.1.C12\tGAGATACAGTTC\tGTGTGYCAGCMGCCGCGGTAA\t1\tC12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB3.640195\tTest plate 1_1.SKB3.640195.Test.plate.1.C12_C12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.1.D1\tGTGGAGTCTCAT\tGTGTGYCAGCMGCCGCGGTAA\t1\tD1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB4.640189\tTest plate 1_1.SKB4.640189.Test.plate.1.D1_D1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.1.D2\tACCTTACACCTT\tGTGTGYCAGCMGCCGCGGTAA\t1\tD2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB4.640189\tTest plate 1_1.SKB4.640189.Test.plate.1.D2_D2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.1.D3\tTAATCTCGCCGG\tGTGTGYCAGCMGCCGCGGTAA\t1\tD3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB4.640189\tTest plate 1_1.SKB4.640189.Test.plate.1.D3_D3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.1.D4\tATCTAGTGGCAA\tGTGTGYCAGCMGCCGCGGTAA\t1\tD4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB4.640189\tTest plate 1_1.SKB4.640189.Test.plate.1.D4_D4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.1.D5\tACGCTTAACGAC\tGTGTGYCAGCMGCCGCGGTAA\t1\tD5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB4.640189\tTest plate 1_1.SKB4.640189.Test.plate.1.D5_D5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.1.D6\tTACGGATTATGG\tGTGTGYCAGCMGCCGCGGTAA\t1\tD6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB4.640189\tTest plate 1_1.SKB4.640189.Test.plate.1.D6_D6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.1.D7\tATACATGCAAGA\tGTGTGYCAGCMGCCGCGGTAA\t1\tD7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB4.640189\tTest plate 1_1.SKB4.640189.Test.plate.1.D7_D7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.1.D8\tCTTAGTGCAGAA\tGTGTGYCAGCMGCCGCGGTAA\t1\tD8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB4.640189\tTest plate 1_1.SKB4.640189.Test.plate.1.D8_D8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.1.D9\tAATCTTGCGCCG\tGTGTGYCAGCMGCCGCGGTAA\t1\tD9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB4.640189\tTest plate 1_1.SKB4.640189.Test.plate.1.D9_D9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.1.D10\tAGGATCAGGGAA\tGTGTGYCAGCMGCCGCGGTAA\t1\tD10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB4.640189\tTest plate 1_1.SKB4.640189.Test.plate.1.D10_D10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.1.D11\tAATAACTAGGGT\tGTGTGYCAGCMGCCGCGGTAA\t1\tD11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB4.640189\tTest plate 1_1.SKB4.640189.Test.plate.1.D11_D11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.1.D12\tTATTGCAGCAGC\tGTGTGYCAGCMGCCGCGGTAA\t1\tD12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB4.640189\tTest plate 1_1.SKB4.640189.Test.plate.1.D12_D12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.1.E1\tTGATGTGCTAAG\tGTGTGYCAGCMGCCGCGGTAA\t1\tE1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB5.640181\tTest plate 1_1.SKB5.640181.Test.plate.1.E1_E1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.1.E2\tGTAGTAGACCAT\tGTGTGYCAGCMGCCGCGGTAA\t1\tE2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB5.640181\tTest plate 1_1.SKB5.640181.Test.plate.1.E2_E2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.1.E3\tAGTAAAGATCGT\tGTGTGYCAGCMGCCGCGGTAA\t1\tE3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB5.640181\tTest plate 1_1.SKB5.640181.Test.plate.1.E3_E3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.1.E4\tCTCGCCCTCGCC\tGTGTGYCAGCMGCCGCGGTAA\t1\tE4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB5.640181\tTest plate 1_1.SKB5.640181.Test.plate.1.E4_E4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.1.E5\tTCTCTTTCGACA\tGTGTGYCAGCMGCCGCGGTAA\t1\tE5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB5.640181\tTest plate 1_1.SKB5.640181.Test.plate.1.E5_E5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.1.E6\tACATACTGAGCA\tGTGTGYCAGCMGCCGCGGTAA\t1\tE6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB5.640181\tTest plate 1_1.SKB5.640181.Test.plate.1.E6_E6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.1.E7\tGTTGATACGATG\tGTGTGYCAGCMGCCGCGGTAA\t1\tE7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB5.640181\tTest plate 1_1.SKB5.640181.Test.plate.1.E7_E7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.1.E8\tGTCAACGCTGTC\tGTGTGYCAGCMGCCGCGGTAA\t1\tE8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB5.640181\tTest plate 1_1.SKB5.640181.Test.plate.1.E8_E8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.1.E9\tTGAGACCCTACA\tGTGTGYCAGCMGCCGCGGTAA\t1\tE9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB5.640181\tTest plate 1_1.SKB5.640181.Test.plate.1.E9_E9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.1.E10\tACTTGGTGTAAG\tGTGTGYCAGCMGCCGCGGTAA\t1\tE10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB5.640181\tTest plate 1_1.SKB5.640181.Test.plate.1.E10_E10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.1.E11\tATTACGTATCAT\tGTGTGYCAGCMGCCGCGGTAA\t1\tE11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB5.640181\tTest plate 1_1.SKB5.640181.Test.plate.1.E11_E11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.1.E12\tCACGCAGTCTAC\tGTGTGYCAGCMGCCGCGGTAA\t1\tE12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB5.640181\tTest plate 1_1.SKB5.640181.Test.plate.1.E12_E12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.1.F1\tTGTGCACGCCAT\tGTGTGYCAGCMGCCGCGGTAA\t1\tF1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB6.640176\tTest plate 1_1.SKB6.640176.Test.plate.1.F1_F1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.1.F2\tCCGGACAAGAAG\tGTGTGYCAGCMGCCGCGGTAA\t1\tF2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB6.640176\tTest plate 1_1.SKB6.640176.Test.plate.1.F2_F2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.1.F3\tTTGCTGGACGCT\tGTGTGYCAGCMGCCGCGGTAA\t1\tF3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB6.640176\tTest plate 1_1.SKB6.640176.Test.plate.1.F3_F3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.1.F4\tTACTAACGCGGT\tGTGTGYCAGCMGCCGCGGTAA\t1\tF4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB6.640176\tTest plate 1_1.SKB6.640176.Test.plate.1.F4_F4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.1.F5\tGCGATCACACCT\tGTGTGYCAGCMGCCGCGGTAA\t1\tF5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB6.640176\tTest plate 1_1.SKB6.640176.Test.plate.1.F5_F5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.1.F6\tCAAACGCACTAA\tGTGTGYCAGCMGCCGCGGTAA\t1\tF6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB6.640176\tTest plate 1_1.SKB6.640176.Test.plate.1.F6_F6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.1.F7\tGAAGAGGGTTGA\tGTGTGYCAGCMGCCGCGGTAA\t1\tF7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB6.640176\tTest plate 1_1.SKB6.640176.Test.plate.1.F7_F7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.1.F8\tTGAGTGGTCTGT\tGTGTGYCAGCMGCCGCGGTAA\t1\tF8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB6.640176\tTest plate 1_1.SKB6.640176.Test.plate.1.F8_F8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.1.F9\tTTACACAAAGGC\tGTGTGYCAGCMGCCGCGGTAA\t1\tF9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB6.640176\tTest plate 1_1.SKB6.640176.Test.plate.1.F9_F9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.1.F10\tACGACGCATTTG\tGTGTGYCAGCMGCCGCGGTAA\t1\tF10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB6.640176\tTest plate 1_1.SKB6.640176.Test.plate.1.F10_F10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.1.F11\tTATCCAAGCGCA\tGTGTGYCAGCMGCCGCGGTAA\t1\tF11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB6.640176\tTest plate 1_1.SKB6.640176.Test.plate.1.F11_F11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB8.640193\tAGAGCCAAGAGC\tGTGTGYCAGCMGCCGCGGTAA\t1\tF12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\tCannabis Soils\t1.SKB8.640193\tTest plate 1_1.SKB8.640193_F12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.2.A1\tCTACAGGGTCTC\tGTGTGYCAGCMGCCGCGGTAA\t2\tA1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB1.640202\tTest plate 2_1.SKB1.640202.Test.plate.2.A1_A1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.2.A2\tCTTGGAGGCTTA\tGTGTGYCAGCMGCCGCGGTAA\t2\tA2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB1.640202\tTest plate 2_1.SKB1.640202.Test.plate.2.A2_A2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.2.A3\tTATCATATTACG\tGTGTGYCAGCMGCCGCGGTAA\t2\tA3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB1.640202\tTest plate 2_1.SKB1.640202.Test.plate.2.A3_A3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.2.A4\tCTATATTATCCG\tGTGTGYCAGCMGCCGCGGTAA\t2\tA4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB1.640202\tTest plate 2_1.SKB1.640202.Test.plate.2.A4_A4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.2.A5\tACCGAACAATCC\tGTGTGYCAGCMGCCGCGGTAA\t2\tA5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB1.640202\tTest plate 2_1.SKB1.640202.Test.plate.2.A5_A5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.2.A6\tACGGTACCCTAC\tGTGTGYCAGCMGCCGCGGTAA\t2\tA6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB1.640202\tTest plate 2_1.SKB1.640202.Test.plate.2.A6_A6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.2.A7\tTGAGTCATTGAG\tGTGTGYCAGCMGCCGCGGTAA\t2\tA7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB1.640202\tTest plate 2_1.SKB1.640202.Test.plate.2.A7_A7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.2.A8\tACCTACTTGTCT\tGTGTGYCAGCMGCCGCGGTAA\t2\tA8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB1.640202\tTest plate 2_1.SKB1.640202.Test.plate.2.A8_A8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.2.A9\tACTGTGACGTCC\tGTGTGYCAGCMGCCGCGGTAA\t2\tA9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB1.640202\tTest plate 2_1.SKB1.640202.Test.plate.2.A9_A9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.2.A10\tCTCTGAGGTAAC\tGTGTGYCAGCMGCCGCGGTAA\t2\tA10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB1.640202\tTest plate 2_1.SKB1.640202.Test.plate.2.A10_A10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.2.A11\tCATGTCTTCCAT\tGTGTGYCAGCMGCCGCGGTAA\t2\tA11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB1.640202\tTest plate 2_1.SKB1.640202.Test.plate.2.A11_A11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.2.A12\tAACAGTAAACAA\tGTGTGYCAGCMGCCGCGGTAA\t2\tA12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB1.640202\tTest plate 2_1.SKB1.640202.Test.plate.2.A12_A12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.2.B1\tGTTCATTAAACT\tGTGTGYCAGCMGCCGCGGTAA\t2\tB1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB2.640194\tTest plate 2_1.SKB2.640194.Test.plate.2.B1_B1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.2.B2\tGTGCCGGCCGAC\tGTGTGYCAGCMGCCGCGGTAA\t2\tB2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB2.640194\tTest plate 2_1.SKB2.640194.Test.plate.2.B2_B2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.2.B3\tCCTTGACCGATG\tGTGTGYCAGCMGCCGCGGTAA\t2\tB3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB2.640194\tTest plate 2_1.SKB2.640194.Test.plate.2.B3_B3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.2.B4\tCAAACTGCGTTG\tGTGTGYCAGCMGCCGCGGTAA\t2\tB4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB2.640194\tTest plate 2_1.SKB2.640194.Test.plate.2.B4_B4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.2.B5\tTCGAGAGTTTGC\tGTGTGYCAGCMGCCGCGGTAA\t2\tB5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB2.640194\tTest plate 2_1.SKB2.640194.Test.plate.2.B5_B5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.2.B6\tCGACACGGAGAA\tGTGTGYCAGCMGCCGCGGTAA\t2\tB6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB2.640194\tTest plate 2_1.SKB2.640194.Test.plate.2.B6_B6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.2.B7\tTCCACAGGGTTC\tGTGTGYCAGCMGCCGCGGTAA\t2\tB7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB2.640194\tTest plate 2_1.SKB2.640194.Test.plate.2.B7_B7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.2.B8\tGGAGAACGACAC\tGTGTGYCAGCMGCCGCGGTAA\t2\tB8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB2.640194\tTest plate 2_1.SKB2.640194.Test.plate.2.B8_B8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.2.B9\tCCTACCATTGTT\tGTGTGYCAGCMGCCGCGGTAA\t2\tB9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB2.640194\tTest plate 2_1.SKB2.640194.Test.plate.2.B9_B9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.2.B10\tTCCGGCGGGCAA\tGTGTGYCAGCMGCCGCGGTAA\t2\tB10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB2.640194\tTest plate 2_1.SKB2.640194.Test.plate.2.B10_B10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.2.B11\tTAATCCATAATC\tGTGTGYCAGCMGCCGCGGTAA\t2\tB11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB2.640194\tTest plate 2_1.SKB2.640194.Test.plate.2.B11_B11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.2.B12\tCCTCCGTCATGG\tGTGTGYCAGCMGCCGCGGTAA\t2\tB12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB2.640194\tTest plate 2_1.SKB2.640194.Test.plate.2.B12_B12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.2.C1\tTTCGATGCCGCA\tGTGTGYCAGCMGCCGCGGTAA\t2\tC1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB3.640195\tTest plate 2_1.SKB3.640195.Test.plate.2.C1_C1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.2.C2\tAGAGGGTGATCG\tGTGTGYCAGCMGCCGCGGTAA\t2\tC2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB3.640195\tTest plate 2_1.SKB3.640195.Test.plate.2.C2_C2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.2.C3\tAGCTCTAGAAAC\tGTGTGYCAGCMGCCGCGGTAA\t2\tC3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB3.640195\tTest plate 2_1.SKB3.640195.Test.plate.2.C3_C3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.2.C4\tCTGACACGAATA\tGTGTGYCAGCMGCCGCGGTAA\t2\tC4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB3.640195\tTest plate 2_1.SKB3.640195.Test.plate.2.C4_C4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.2.C5\tGCTGCCCACCTA\tGTGTGYCAGCMGCCGCGGTAA\t2\tC5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB3.640195\tTest plate 2_1.SKB3.640195.Test.plate.2.C5_C5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.2.C6\tGCGTTTGCTAGC\tGTGTGYCAGCMGCCGCGGTAA\t2\tC6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB3.640195\tTest plate 2_1.SKB3.640195.Test.plate.2.C6_C6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.2.C7\tAGATCGTGCCTA\tGTGTGYCAGCMGCCGCGGTAA\t2\tC7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB3.640195\tTest plate 2_1.SKB3.640195.Test.plate.2.C7_C7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.2.C8\tAATTAATATGTA\tGTGTGYCAGCMGCCGCGGTAA\t2\tC8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB3.640195\tTest plate 2_1.SKB3.640195.Test.plate.2.C8_C8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.2.C9\tCATTTCGCACTT\tGTGTGYCAGCMGCCGCGGTAA\t2\tC9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB3.640195\tTest plate 2_1.SKB3.640195.Test.plate.2.C9_C9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.2.C10\tACATGATATTCT\tGTGTGYCAGCMGCCGCGGTAA\t2\tC10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB3.640195\tTest plate 2_1.SKB3.640195.Test.plate.2.C10_C10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.2.C11\tGCAACGAACGAG\tGTGTGYCAGCMGCCGCGGTAA\t2\tC11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB3.640195\tTest plate 2_1.SKB3.640195.Test.plate.2.C11_C11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.2.C12\tAGATGTCCGTCA\tGTGTGYCAGCMGCCGCGGTAA\t2\tC12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB3.640195\tTest plate 2_1.SKB3.640195.Test.plate.2.C12_C12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.2.D1\tTCGTTATTCAGT\tGTGTGYCAGCMGCCGCGGTAA\t2\tD1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB4.640189\tTest plate 2_1.SKB4.640189.Test.plate.2.D1_D1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.2.D2\tGGATACTCGCAT\tGTGTGYCAGCMGCCGCGGTAA\t2\tD2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB4.640189\tTest plate 2_1.SKB4.640189.Test.plate.2.D2_D2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.2.D3\tAATGTTCAACTT\tGTGTGYCAGCMGCCGCGGTAA\t2\tD3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB4.640189\tTest plate 2_1.SKB4.640189.Test.plate.2.D3_D3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.2.D4\tAGCAGTGCGGTG\tGTGTGYCAGCMGCCGCGGTAA\t2\tD4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB4.640189\tTest plate 2_1.SKB4.640189.Test.plate.2.D4_D4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.2.D5\tGCATATGCACTG\tGTGTGYCAGCMGCCGCGGTAA\t2\tD5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB4.640189\tTest plate 2_1.SKB4.640189.Test.plate.2.D5_D5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.2.D6\tCCGGCGACAGAA\tGTGTGYCAGCMGCCGCGGTAA\t2\tD6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB4.640189\tTest plate 2_1.SKB4.640189.Test.plate.2.D6_D6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.2.D7\tCCTCACTAGCGA\tGTGTGYCAGCMGCCGCGGTAA\t2\tD7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB4.640189\tTest plate 2_1.SKB4.640189.Test.plate.2.D7_D7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.2.D8\tCTAATCAGAGTG\tGTGTGYCAGCMGCCGCGGTAA\t2\tD8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB4.640189\tTest plate 2_1.SKB4.640189.Test.plate.2.D8_D8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.2.D9\tCTACTCCACGAG\tGTGTGYCAGCMGCCGCGGTAA\t2\tD9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB4.640189\tTest plate 2_1.SKB4.640189.Test.plate.2.D9_D9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.2.D10\tTAAGGCATCGCT\tGTGTGYCAGCMGCCGCGGTAA\t2\tD10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB4.640189\tTest plate 2_1.SKB4.640189.Test.plate.2.D10_D10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.2.D11\tAGCGCGGCGAAT\tGTGTGYCAGCMGCCGCGGTAA\t2\tD11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB4.640189\tTest plate 2_1.SKB4.640189.Test.plate.2.D11_D11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.2.D12\tTAGCAGTTGCGT\tGTGTGYCAGCMGCCGCGGTAA\t2\tD12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB4.640189\tTest plate 2_1.SKB4.640189.Test.plate.2.D12_D12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.2.E1\tACTCTGTAATTA\tGTGTGYCAGCMGCCGCGGTAA\t2\tE1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB5.640181\tTest plate 2_1.SKB5.640181.Test.plate.2.E1_E1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.2.E2\tTCATGGCCTCCG\tGTGTGYCAGCMGCCGCGGTAA\t2\tE2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB5.640181\tTest plate 2_1.SKB5.640181.Test.plate.2.E2_E2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.2.E3\tCAATCATAGGTG\tGTGTGYCAGCMGCCGCGGTAA\t2\tE3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB5.640181\tTest plate 2_1.SKB5.640181.Test.plate.2.E3_E3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.2.E4\tGTTGGACGAAGG\tGTGTGYCAGCMGCCGCGGTAA\t2\tE4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB5.640181\tTest plate 2_1.SKB5.640181.Test.plate.2.E4_E4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.2.E5\tGTCACTCCGAAC\tGTGTGYCAGCMGCCGCGGTAA\t2\tE5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB5.640181\tTest plate 2_1.SKB5.640181.Test.plate.2.E5_E5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.2.E6\tCGTTCTGGTGGT\tGTGTGYCAGCMGCCGCGGTAA\t2\tE6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB5.640181\tTest plate 2_1.SKB5.640181.Test.plate.2.E6_E6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.2.E7\tTAGTTCGGTGAC\tGTGTGYCAGCMGCCGCGGTAA\t2\tE7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB5.640181\tTest plate 2_1.SKB5.640181.Test.plate.2.E7_E7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.2.E8\tTTAATGGATCGG\tGTGTGYCAGCMGCCGCGGTAA\t2\tE8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB5.640181\tTest plate 2_1.SKB5.640181.Test.plate.2.E8_E8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.2.E9\tTCAAGTCCGCAC\tGTGTGYCAGCMGCCGCGGTAA\t2\tE9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB5.640181\tTest plate 2_1.SKB5.640181.Test.plate.2.E9_E9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.2.E10\tCACACAAAGTCA\tGTGTGYCAGCMGCCGCGGTAA\t2\tE10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB5.640181\tTest plate 2_1.SKB5.640181.Test.plate.2.E10_E10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.2.E11\tGTCAGGTGCGGC\tGTGTGYCAGCMGCCGCGGTAA\t2\tE11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB5.640181\tTest plate 2_1.SKB5.640181.Test.plate.2.E11_E11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.2.E12\tTTGAACAAGCCA\tGTGTGYCAGCMGCCGCGGTAA\t2\tE12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB5.640181\tTest plate 2_1.SKB5.640181.Test.plate.2.E12_E12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.2.F1\tATATGTTCTCAA\tGTGTGYCAGCMGCCGCGGTAA\t2\tF1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB6.640176\tTest plate 2_1.SKB6.640176.Test.plate.2.F1_F1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.2.F2\tATGTGCTGCTCG\tGTGTGYCAGCMGCCGCGGTAA\t2\tF2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB6.640176\tTest plate 2_1.SKB6.640176.Test.plate.2.F2_F2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.2.F3\tCCGATAAAGGTT\tGTGTGYCAGCMGCCGCGGTAA\t2\tF3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB6.640176\tTest plate 2_1.SKB6.640176.Test.plate.2.F3_F3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.2.F4\tCAGGAACCAGGA\tGTGTGYCAGCMGCCGCGGTAA\t2\tF4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB6.640176\tTest plate 2_1.SKB6.640176.Test.plate.2.F4_F4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.2.F5\tGCATAAACGACT\tGTGTGYCAGCMGCCGCGGTAA\t2\tF5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB6.640176\tTest plate 2_1.SKB6.640176.Test.plate.2.F5_F5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.2.F6\tATCGTAGTGGTC\tGTGTGYCAGCMGCCGCGGTAA\t2\tF6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB6.640176\tTest plate 2_1.SKB6.640176.Test.plate.2.F6_F6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.2.F7\tACTAAAGCAAAC\tGTGTGYCAGCMGCCGCGGTAA\t2\tF7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB6.640176\tTest plate 2_1.SKB6.640176.Test.plate.2.F7_F7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.2.F8\tTAGGAACTCACC\tGTGTGYCAGCMGCCGCGGTAA\t2\tF8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB6.640176\tTest plate 2_1.SKB6.640176.Test.plate.2.F8_F8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.2.F9\tGTCCGTCCTGGT\tGTGTGYCAGCMGCCGCGGTAA\t2\tF9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB6.640176\tTest plate 2_1.SKB6.640176.Test.plate.2.F9_F9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.2.F10\tCGAGGCGAGTCA\tGTGTGYCAGCMGCCGCGGTAA\t2\tF10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB6.640176\tTest plate 2_1.SKB6.640176.Test.plate.2.F10_F10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.2.F11\tTTCCAATACTCA\tGTGTGYCAGCMGCCGCGGTAA\t2\tF11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKB6.640176\tTest plate 2_1.SKB6.640176.Test.plate.2.F11_F11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKD1.640179\tAACTCAATAGCG\tGTGTGYCAGCMGCCGCGGTAA\t2\tF12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\tCannabis Soils\t1.SKD1.640179\tTest plate 2_1.SKD1.640179_F12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.3.A1\tCCTCGCATGACC\tGTGTGYCAGCMGCCGCGGTAA\t3\tA1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB1.640202\tTest plate 3_1.SKB1.640202.Test.plate.3.A1_A1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.3.A2\tGGCGTAACGGCA\tGTGTGYCAGCMGCCGCGGTAA\t3\tA2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB1.640202\tTest plate 3_1.SKB1.640202.Test.plate.3.A2_A2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.3.A3\tGCGAGGAAGTCC\tGTGTGYCAGCMGCCGCGGTAA\t3\tA3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB1.640202\tTest plate 3_1.SKB1.640202.Test.plate.3.A3_A3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.3.A4\tCAAATTCGGGAT\tGTGTGYCAGCMGCCGCGGTAA\t3\tA4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB1.640202\tTest plate 3_1.SKB1.640202.Test.plate.3.A4_A4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.3.A5\tTTGTGTCTCCCT\tGTGTGYCAGCMGCCGCGGTAA\t3\tA5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB1.640202\tTest plate 3_1.SKB1.640202.Test.plate.3.A5_A5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.3.A6\tCAATGTAGACAC\tGTGTGYCAGCMGCCGCGGTAA\t3\tA6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB1.640202\tTest plate 3_1.SKB1.640202.Test.plate.3.A6_A6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.3.A7\tAACCACTAACCG\tGTGTGYCAGCMGCCGCGGTAA\t3\tA7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB1.640202\tTest plate 3_1.SKB1.640202.Test.plate.3.A7_A7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.3.A8\tAACTTTCAGGAG\tGTGTGYCAGCMGCCGCGGTAA\t3\tA8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB1.640202\tTest plate 3_1.SKB1.640202.Test.plate.3.A8_A8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.3.A9\tCCAGGACAGGAA\tGTGTGYCAGCMGCCGCGGTAA\t3\tA9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB1.640202\tTest plate 3_1.SKB1.640202.Test.plate.3.A9_A9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.3.A10\tGCGCGGCGTTGC\tGTGTGYCAGCMGCCGCGGTAA\t3\tA10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB1.640202\tTest plate 3_1.SKB1.640202.Test.plate.3.A10_A10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.3.A11\tGTCGCTTGCACA\tGTGTGYCAGCMGCCGCGGTAA\t3\tA11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB1.640202\tTest plate 3_1.SKB1.640202.Test.plate.3.A11_A11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.3.A12\tTCCGCCTAGTCG\tGTGTGYCAGCMGCCGCGGTAA\t3\tA12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB1.640202\tTest plate 3_1.SKB1.640202.Test.plate.3.A12_A12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.3.B1\tCGCGCAAGTATT\tGTGTGYCAGCMGCCGCGGTAA\t3\tB1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB2.640194\tTest plate 3_1.SKB2.640194.Test.plate.3.B1_B1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.3.B2\tAATACAGACCTG\tGTGTGYCAGCMGCCGCGGTAA\t3\tB2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB2.640194\tTest plate 3_1.SKB2.640194.Test.plate.3.B2_B2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.3.B3\tGGACAAGTGCGA\tGTGTGYCAGCMGCCGCGGTAA\t3\tB3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB2.640194\tTest plate 3_1.SKB2.640194.Test.plate.3.B3_B3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.3.B4\tTACGGTCTGGAT\tGTGTGYCAGCMGCCGCGGTAA\t3\tB4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB2.640194\tTest plate 3_1.SKB2.640194.Test.plate.3.B4_B4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.3.B5\tTTCAGTTCGTTA\tGTGTGYCAGCMGCCGCGGTAA\t3\tB5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB2.640194\tTest plate 3_1.SKB2.640194.Test.plate.3.B5_B5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.3.B6\tCCGCGTCTCAAC\tGTGTGYCAGCMGCCGCGGTAA\t3\tB6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB2.640194\tTest plate 3_1.SKB2.640194.Test.plate.3.B6_B6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.3.B7\tCCGAGGTATAAT\tGTGTGYCAGCMGCCGCGGTAA\t3\tB7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB2.640194\tTest plate 3_1.SKB2.640194.Test.plate.3.B7_B7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.3.B8\tAGATTCGCTCGA\tGTGTGYCAGCMGCCGCGGTAA\t3\tB8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB2.640194\tTest plate 3_1.SKB2.640194.Test.plate.3.B8_B8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.3.B9\tTTGCCGCTCTGG\tGTGTGYCAGCMGCCGCGGTAA\t3\tB9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB2.640194\tTest plate 3_1.SKB2.640194.Test.plate.3.B9_B9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.3.B10\tAGACTTCTCAGG\tGTGTGYCAGCMGCCGCGGTAA\t3\tB10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB2.640194\tTest plate 3_1.SKB2.640194.Test.plate.3.B10_B10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.3.B11\tTCTTGCGGAGTC\tGTGTGYCAGCMGCCGCGGTAA\t3\tB11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB2.640194\tTest plate 3_1.SKB2.640194.Test.plate.3.B11_B11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.3.B12\tCTATCTCCTGTC\tGTGTGYCAGCMGCCGCGGTAA\t3\tB12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB2.640194\tTest plate 3_1.SKB2.640194.Test.plate.3.B12_B12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.3.C1\tAAGGCGCTCCTT\tGTGTGYCAGCMGCCGCGGTAA\t3\tC1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB3.640195\tTest plate 3_1.SKB3.640195.Test.plate.3.C1_C1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.3.C2\tGATCTAATCGAG\tGTGTGYCAGCMGCCGCGGTAA\t3\tC2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB3.640195\tTest plate 3_1.SKB3.640195.Test.plate.3.C2_C2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.3.C3\tCTGATGTACACG\tGTGTGYCAGCMGCCGCGGTAA\t3\tC3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB3.640195\tTest plate 3_1.SKB3.640195.Test.plate.3.C3_C3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.3.C4\tACGTATTCGAAG\tGTGTGYCAGCMGCCGCGGTAA\t3\tC4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB3.640195\tTest plate 3_1.SKB3.640195.Test.plate.3.C4_C4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.3.C5\tGACGTTAAGAAT\tGTGTGYCAGCMGCCGCGGTAA\t3\tC5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB3.640195\tTest plate 3_1.SKB3.640195.Test.plate.3.C5_C5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.3.C6\tTGGTGGAGTTTC\tGTGTGYCAGCMGCCGCGGTAA\t3\tC6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB3.640195\tTest plate 3_1.SKB3.640195.Test.plate.3.C6_C6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.3.C7\tTTAACAAGGCAA\tGTGTGYCAGCMGCCGCGGTAA\t3\tC7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB3.640195\tTest plate 3_1.SKB3.640195.Test.plate.3.C7_C7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.3.C8\tAACCGCATAAGT\tGTGTGYCAGCMGCCGCGGTAA\t3\tC8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB3.640195\tTest plate 3_1.SKB3.640195.Test.plate.3.C8_C8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.3.C9\tCCACAACGATCA\tGTGTGYCAGCMGCCGCGGTAA\t3\tC9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB3.640195\tTest plate 3_1.SKB3.640195.Test.plate.3.C9_C9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.3.C10\tAGTTCTCATTAA\tGTGTGYCAGCMGCCGCGGTAA\t3\tC10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB3.640195\tTest plate 3_1.SKB3.640195.Test.plate.3.C10_C10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.3.C11\tGAGCCATCTGTA\tGTGTGYCAGCMGCCGCGGTAA\t3\tC11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB3.640195\tTest plate 3_1.SKB3.640195.Test.plate.3.C11_C11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.3.C12\tGATATACCAGTG\tGTGTGYCAGCMGCCGCGGTAA\t3\tC12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB3.640195\tTest plate 3_1.SKB3.640195.Test.plate.3.C12_C12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.3.D1\tCGCAATGAGGGA\tGTGTGYCAGCMGCCGCGGTAA\t3\tD1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB4.640189\tTest plate 3_1.SKB4.640189.Test.plate.3.D1_D1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.3.D2\tCCGCAGCCGCAG\tGTGTGYCAGCMGCCGCGGTAA\t3\tD2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB4.640189\tTest plate 3_1.SKB4.640189.Test.plate.3.D2_D2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.3.D3\tTGGAGCCTTGTC\tGTGTGYCAGCMGCCGCGGTAA\t3\tD3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB4.640189\tTest plate 3_1.SKB4.640189.Test.plate.3.D3_D3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.3.D4\tTTACTTATCCGA\tGTGTGYCAGCMGCCGCGGTAA\t3\tD4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB4.640189\tTest plate 3_1.SKB4.640189.Test.plate.3.D4_D4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.3.D5\tATGGGACCTTCA\tGTGTGYCAGCMGCCGCGGTAA\t3\tD5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB4.640189\tTest plate 3_1.SKB4.640189.Test.plate.3.D5_D5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.3.D6\tTCCGATAATCGG\tGTGTGYCAGCMGCCGCGGTAA\t3\tD6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB4.640189\tTest plate 3_1.SKB4.640189.Test.plate.3.D6_D6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.3.D7\tAAGTCACACACA\tGTGTGYCAGCMGCCGCGGTAA\t3\tD7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB4.640189\tTest plate 3_1.SKB4.640189.Test.plate.3.D7_D7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.3.D8\tGAAGTAGCGAGC\tGTGTGYCAGCMGCCGCGGTAA\t3\tD8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB4.640189\tTest plate 3_1.SKB4.640189.Test.plate.3.D8_D8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.3.D9\tCACCATCTCCGG\tGTGTGYCAGCMGCCGCGGTAA\t3\tD9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB4.640189\tTest plate 3_1.SKB4.640189.Test.plate.3.D9_D9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.3.D10\tGTGTCGAGGGCA\tGTGTGYCAGCMGCCGCGGTAA\t3\tD10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB4.640189\tTest plate 3_1.SKB4.640189.Test.plate.3.D10_D10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.3.D11\tTTCCACACGTGG\tGTGTGYCAGCMGCCGCGGTAA\t3\tD11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB4.640189\tTest plate 3_1.SKB4.640189.Test.plate.3.D11_D11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.3.D12\tAGAATCCACCAC\tGTGTGYCAGCMGCCGCGGTAA\t3\tD12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB4.640189\tTest plate 3_1.SKB4.640189.Test.plate.3.D12_D12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.3.E1\tACGGCGTTATGT\tGTGTGYCAGCMGCCGCGGTAA\t3\tE1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB5.640181\tTest plate 3_1.SKB5.640181.Test.plate.3.E1_E1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.3.E2\tGAACCGTGCAGG\tGTGTGYCAGCMGCCGCGGTAA\t3\tE2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB5.640181\tTest plate 3_1.SKB5.640181.Test.plate.3.E2_E2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.3.E3\tACGTGCCTTAGA\tGTGTGYCAGCMGCCGCGGTAA\t3\tE3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB5.640181\tTest plate 3_1.SKB5.640181.Test.plate.3.E3_E3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.3.E4\tAGTTGTAGTCCG\tGTGTGYCAGCMGCCGCGGTAA\t3\tE4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB5.640181\tTest plate 3_1.SKB5.640181.Test.plate.3.E4_E4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.3.E5\tAGGGACTTCAAT\tGTGTGYCAGCMGCCGCGGTAA\t3\tE5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB5.640181\tTest plate 3_1.SKB5.640181.Test.plate.3.E5_E5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.3.E6\tCGGCCAGAAGCA\tGTGTGYCAGCMGCCGCGGTAA\t3\tE6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB5.640181\tTest plate 3_1.SKB5.640181.Test.plate.3.E6_E6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.3.E7\tTGGCAGCGAGCC\tGTGTGYCAGCMGCCGCGGTAA\t3\tE7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB5.640181\tTest plate 3_1.SKB5.640181.Test.plate.3.E7_E7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.3.E8\tGTGAATGTTCGA\tGTGTGYCAGCMGCCGCGGTAA\t3\tE8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB5.640181\tTest plate 3_1.SKB5.640181.Test.plate.3.E8_E8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.3.E9\tTATGTTGACGGC\tGTGTGYCAGCMGCCGCGGTAA\t3\tE9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB5.640181\tTest plate 3_1.SKB5.640181.Test.plate.3.E9_E9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.3.E10\tAGTGTTTCGGAC\tGTGTGYCAGCMGCCGCGGTAA\t3\tE10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB5.640181\tTest plate 3_1.SKB5.640181.Test.plate.3.E10_E10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.3.E11\tATTTCCGCTAAT\tGTGTGYCAGCMGCCGCGGTAA\t3\tE11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB5.640181\tTest plate 3_1.SKB5.640181.Test.plate.3.E11_E11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.3.E12\tCAAACCTATGGC\tGTGTGYCAGCMGCCGCGGTAA\t3\tE12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB5.640181\tTest plate 3_1.SKB5.640181.Test.plate.3.E12_E12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.3.F1\tCATTTGACGACG\tGTGTGYCAGCMGCCGCGGTAA\t3\tF1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB6.640176\tTest plate 3_1.SKB6.640176.Test.plate.3.F1_F1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.3.F2\tACTAAGTACCCG\tGTGTGYCAGCMGCCGCGGTAA\t3\tF2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB6.640176\tTest plate 3_1.SKB6.640176.Test.plate.3.F2_F2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.3.F3\tCACCCTTGCGAC\tGTGTGYCAGCMGCCGCGGTAA\t3\tF3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB6.640176\tTest plate 3_1.SKB6.640176.Test.plate.3.F3_F3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.3.F4\tGATGCCTAATGA\tGTGTGYCAGCMGCCGCGGTAA\t3\tF4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB6.640176\tTest plate 3_1.SKB6.640176.Test.plate.3.F4_F4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.3.F5\tGTACGTCACTGA\tGTGTGYCAGCMGCCGCGGTAA\t3\tF5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB6.640176\tTest plate 3_1.SKB6.640176.Test.plate.3.F5_F5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.3.F6\tTCGCTACAGATG\tGTGTGYCAGCMGCCGCGGTAA\t3\tF6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB6.640176\tTest plate 3_1.SKB6.640176.Test.plate.3.F6_F6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.3.F7\tCCGGCTTATGTG\tGTGTGYCAGCMGCCGCGGTAA\t3\tF7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB6.640176\tTest plate 3_1.SKB6.640176.Test.plate.3.F7_F7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.3.F8\tATAGTCCTTTAA\tGTGTGYCAGCMGCCGCGGTAA\t3\tF8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB6.640176\tTest plate 3_1.SKB6.640176.Test.plate.3.F8_F8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.3.F9\tTCGAGCCGATCT\tGTGTGYCAGCMGCCGCGGTAA\t3\tF9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB6.640176\tTest plate 3_1.SKB6.640176.Test.plate.3.F9_F9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.3.F10\tAGTGCAGGAGCC\tGTGTGYCAGCMGCCGCGGTAA\t3\tF10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB6.640176\tTest plate 3_1.SKB6.640176.Test.plate.3.F10_F10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.3.F11\tGTACTCGAACCA\tGTGTGYCAGCMGCCGCGGTAA\t3\tF11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKB6.640176\tTest plate 3_1.SKB6.640176.Test.plate.3.F11_F11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKD5.640186\tATAGGAATAACC\tGTGTGYCAGCMGCCGCGGTAA\t3\tF12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\tCannabis Soils\t1.SKD5.640186\tTest plate 3_1.SKD5.640186_F12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.4.A1\tTAGGACGGGAGT\tGTGTGYCAGCMGCCGCGGTAA\t4\tA1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB1.640202\tTest plate 4_1.SKB1.640202.Test.plate.4.A1_A1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.4.A2\tAAGTCTTATCTC\tGTGTGYCAGCMGCCGCGGTAA\t4\tA2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB1.640202\tTest plate 4_1.SKB1.640202.Test.plate.4.A2_A2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.4.A3\tTTGCACCGTCGA\tGTGTGYCAGCMGCCGCGGTAA\t4\tA3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB1.640202\tTest plate 4_1.SKB1.640202.Test.plate.4.A3_A3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.4.A4\tCTCCGAACAACA\tGTGTGYCAGCMGCCGCGGTAA\t4\tA4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB1.640202\tTest plate 4_1.SKB1.640202.Test.plate.4.A4_A4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.4.A5\tTCTGGCTACGAC\tGTGTGYCAGCMGCCGCGGTAA\t4\tA5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB1.640202\tTest plate 4_1.SKB1.640202.Test.plate.4.A5_A5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.4.A6\tAGTAGTTTCCTT\tGTGTGYCAGCMGCCGCGGTAA\t4\tA6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB1.640202\tTest plate 4_1.SKB1.640202.Test.plate.4.A6_A6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.4.A7\tCAGATCCCAACC\tGTGTGYCAGCMGCCGCGGTAA\t4\tA7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB1.640202\tTest plate 4_1.SKB1.640202.Test.plate.4.A7_A7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.4.A8\tGATAGCACTCGT\tGTGTGYCAGCMGCCGCGGTAA\t4\tA8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB1.640202\tTest plate 4_1.SKB1.640202.Test.plate.4.A8_A8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.4.A9\tGTAATTGTAATT\tGTGTGYCAGCMGCCGCGGTAA\t4\tA9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB1.640202\tTest plate 4_1.SKB1.640202.Test.plate.4.A9_A9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.4.A10\tTGCTACAGACGT\tGTGTGYCAGCMGCCGCGGTAA\t4\tA10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB1.640202\tTest plate 4_1.SKB1.640202.Test.plate.4.A10_A10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.4.A11\tAGGTGAGTTCTA\tGTGTGYCAGCMGCCGCGGTAA\t4\tA11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB1.640202\tTest plate 4_1.SKB1.640202.Test.plate.4.A11_A11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB1.640202.Test.plate.4.A12\tAACGATCATAGA\tGTGTGYCAGCMGCCGCGGTAA\t4\tA12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB1.640202\tTest plate 4_1.SKB1.640202.Test.plate.4.A12_A12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.4.B1\tGTTTGGCCACAC\tGTGTGYCAGCMGCCGCGGTAA\t4\tB1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB2.640194\tTest plate 4_1.SKB2.640194.Test.plate.4.B1_B1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.4.B2\tGTCCTACACAGC\tGTGTGYCAGCMGCCGCGGTAA\t4\tB2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB2.640194\tTest plate 4_1.SKB2.640194.Test.plate.4.B2_B2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.4.B3\tATTTACAATTGA\tGTGTGYCAGCMGCCGCGGTAA\t4\tB3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB2.640194\tTest plate 4_1.SKB2.640194.Test.plate.4.B3_B3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.4.B4\tCCACTGCCCACC\tGTGTGYCAGCMGCCGCGGTAA\t4\tB4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB2.640194\tTest plate 4_1.SKB2.640194.Test.plate.4.B4_B4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.4.B5\tATAGTTAGGGCT\tGTGTGYCAGCMGCCGCGGTAA\t4\tB5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB2.640194\tTest plate 4_1.SKB2.640194.Test.plate.4.B5_B5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.4.B6\tGACCCGTTTCGC\tGTGTGYCAGCMGCCGCGGTAA\t4\tB6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB2.640194\tTest plate 4_1.SKB2.640194.Test.plate.4.B6_B6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.4.B7\tTGACTGCGTTAG\tGTGTGYCAGCMGCCGCGGTAA\t4\tB7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB2.640194\tTest plate 4_1.SKB2.640194.Test.plate.4.B7_B7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.4.B8\tACGTTAATATTC\tGTGTGYCAGCMGCCGCGGTAA\t4\tB8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB2.640194\tTest plate 4_1.SKB2.640194.Test.plate.4.B8_B8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.4.B9\tTCTAACGAGTGC\tGTGTGYCAGCMGCCGCGGTAA\t4\tB9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB2.640194\tTest plate 4_1.SKB2.640194.Test.plate.4.B9_B9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.4.B10\tGATCCCACGTAC\tGTGTGYCAGCMGCCGCGGTAA\t4\tB10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB2.640194\tTest plate 4_1.SKB2.640194.Test.plate.4.B10_B10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.4.B11\tCCGCCAGCTTTG\tGTGTGYCAGCMGCCGCGGTAA\t4\tB11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB2.640194\tTest plate 4_1.SKB2.640194.Test.plate.4.B11_B11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB2.640194.Test.plate.4.B12\tTCATCTTGATTG\tGTGTGYCAGCMGCCGCGGTAA\t4\tB12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB2.640194\tTest plate 4_1.SKB2.640194.Test.plate.4.B12_B12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.4.C1\tTATATAGTATCC\tGTGTGYCAGCMGCCGCGGTAA\t4\tC1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB3.640195\tTest plate 4_1.SKB3.640195.Test.plate.4.C1_C1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.4.C2\tACTGTTTACTGT\tGTGTGYCAGCMGCCGCGGTAA\t4\tC2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB3.640195\tTest plate 4_1.SKB3.640195.Test.plate.4.C2_C2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.4.C3\tGTCACGGACATT\tGTGTGYCAGCMGCCGCGGTAA\t4\tC3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB3.640195\tTest plate 4_1.SKB3.640195.Test.plate.4.C3_C3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.4.C4\tGAATATACCTGG\tGTGTGYCAGCMGCCGCGGTAA\t4\tC4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB3.640195\tTest plate 4_1.SKB3.640195.Test.plate.4.C4_C4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.4.C5\tGAATCTGACAAC\tGTGTGYCAGCMGCCGCGGTAA\t4\tC5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB3.640195\tTest plate 4_1.SKB3.640195.Test.plate.4.C5_C5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.4.C6\tATTGCCTTGATT\tGTGTGYCAGCMGCCGCGGTAA\t4\tC6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB3.640195\tTest plate 4_1.SKB3.640195.Test.plate.4.C6_C6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.4.C7\tGAGCCCAAAGAG\tGTGTGYCAGCMGCCGCGGTAA\t4\tC7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB3.640195\tTest plate 4_1.SKB3.640195.Test.plate.4.C7_C7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.4.C8\tCCATGTGGCTCC\tGTGTGYCAGCMGCCGCGGTAA\t4\tC8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB3.640195\tTest plate 4_1.SKB3.640195.Test.plate.4.C8_C8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.4.C9\tCGTTCCTTGTTA\tGTGTGYCAGCMGCCGCGGTAA\t4\tC9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB3.640195\tTest plate 4_1.SKB3.640195.Test.plate.4.C9_C9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.4.C10\tCGCTAGGATGTT\tGTGTGYCAGCMGCCGCGGTAA\t4\tC10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB3.640195\tTest plate 4_1.SKB3.640195.Test.plate.4.C10_C10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.4.C11\tAGCGGTAGCGGT\tGTGTGYCAGCMGCCGCGGTAA\t4\tC11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB3.640195\tTest plate 4_1.SKB3.640195.Test.plate.4.C11_C11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB3.640195.Test.plate.4.C12\tGTCAGTATGGCT\tGTGTGYCAGCMGCCGCGGTAA\t4\tC12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB3.640195\tTest plate 4_1.SKB3.640195.Test.plate.4.C12_C12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.4.D1\tCATAAGGGAGGC\tGTGTGYCAGCMGCCGCGGTAA\t4\tD1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB4.640189\tTest plate 4_1.SKB4.640189.Test.plate.4.D1_D1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.4.D2\tCAGGCCACTCTC\tGTGTGYCAGCMGCCGCGGTAA\t4\tD2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB4.640189\tTest plate 4_1.SKB4.640189.Test.plate.4.D2_D2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.4.D3\tACAGTTGTACGC\tGTGTGYCAGCMGCCGCGGTAA\t4\tD3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB4.640189\tTest plate 4_1.SKB4.640189.Test.plate.4.D3_D3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.4.D4\tACCAGAAATGTC\tGTGTGYCAGCMGCCGCGGTAA\t4\tD4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB4.640189\tTest plate 4_1.SKB4.640189.Test.plate.4.D4_D4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.4.D5\tCTCATCATGTTC\tGTGTGYCAGCMGCCGCGGTAA\t4\tD5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB4.640189\tTest plate 4_1.SKB4.640189.Test.plate.4.D5_D5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.4.D6\tTTAGGATTCTAT\tGTGTGYCAGCMGCCGCGGTAA\t4\tD6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB4.640189\tTest plate 4_1.SKB4.640189.Test.plate.4.D6_D6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.4.D7\tCAACGAACCATC\tGTGTGYCAGCMGCCGCGGTAA\t4\tD7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB4.640189\tTest plate 4_1.SKB4.640189.Test.plate.4.D7_D7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.4.D8\tACACGTTTGGGT\tGTGTGYCAGCMGCCGCGGTAA\t4\tD8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB4.640189\tTest plate 4_1.SKB4.640189.Test.plate.4.D8_D8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.4.D9\tCGTCGCAGCCTT\tGTGTGYCAGCMGCCGCGGTAA\t4\tD9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB4.640189\tTest plate 4_1.SKB4.640189.Test.plate.4.D9_D9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.4.D10\tCTACTTACATCC\tGTGTGYCAGCMGCCGCGGTAA\t4\tD10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB4.640189\tTest plate 4_1.SKB4.640189.Test.plate.4.D10_D10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.4.D11\tCGCACGTACCTC\tGTGTGYCAGCMGCCGCGGTAA\t4\tD11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB4.640189\tTest plate 4_1.SKB4.640189.Test.plate.4.D11_D11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB4.640189.Test.plate.4.D12\tGTCCTCGCGACT\tGTGTGYCAGCMGCCGCGGTAA\t4\tD12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB4.640189\tTest plate 4_1.SKB4.640189.Test.plate.4.D12_D12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.4.E1\tGTGCAACCAATC\tGTGTGYCAGCMGCCGCGGTAA\t4\tE1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB5.640181\tTest plate 4_1.SKB5.640181.Test.plate.4.E1_E1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.4.E2\tACCCAAGCGTTA\tGTGTGYCAGCMGCCGCGGTAA\t4\tE2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB5.640181\tTest plate 4_1.SKB5.640181.Test.plate.4.E2_E2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.4.E3\tACTGGCAAACCT\tGTGTGYCAGCMGCCGCGGTAA\t4\tE3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB5.640181\tTest plate 4_1.SKB5.640181.Test.plate.4.E3_E3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.4.E4\tAACACCATCGAC\tGTGTGYCAGCMGCCGCGGTAA\t4\tE4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB5.640181\tTest plate 4_1.SKB5.640181.Test.plate.4.E4_E4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.4.E5\tTTATCCAGTCCT\tGTGTGYCAGCMGCCGCGGTAA\t4\tE5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB5.640181\tTest plate 4_1.SKB5.640181.Test.plate.4.E5_E5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.4.E6\tGTTTATCTTAAG\tGTGTGYCAGCMGCCGCGGTAA\t4\tE6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB5.640181\tTest plate 4_1.SKB5.640181.Test.plate.4.E6_E6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.4.E7\tGTTCGCCGCATC\tGTGTGYCAGCMGCCGCGGTAA\t4\tE7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB5.640181\tTest plate 4_1.SKB5.640181.Test.plate.4.E7_E7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.4.E8\tAGACTATTTCAT\tGTGTGYCAGCMGCCGCGGTAA\t4\tE8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB5.640181\tTest plate 4_1.SKB5.640181.Test.plate.4.E8_E8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.4.E9\tAGCGATTCCTCG\tGTGTGYCAGCMGCCGCGGTAA\t4\tE9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB5.640181\tTest plate 4_1.SKB5.640181.Test.plate.4.E9_E9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.4.E10\tACCACCGTAACC\tGTGTGYCAGCMGCCGCGGTAA\t4\tE10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB5.640181\tTest plate 4_1.SKB5.640181.Test.plate.4.E10_E10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.4.E11\tAGGAAGTAACTT\tGTGTGYCAGCMGCCGCGGTAA\t4\tE11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB5.640181\tTest plate 4_1.SKB5.640181.Test.plate.4.E11_E11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB5.640181.Test.plate.4.E12\tCGTTCGCTAGCC\tGTGTGYCAGCMGCCGCGGTAA\t4\tE12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB5.640181\tTest plate 4_1.SKB5.640181.Test.plate.4.E12_E12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.4.F1\tCTCACCTAGGAA\tGTGTGYCAGCMGCCGCGGTAA\t4\tF1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB6.640176\tTest plate 4_1.SKB6.640176.Test.plate.4.F1_F1\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.4.F2\tAGATGCAATGAT\tGTGTGYCAGCMGCCGCGGTAA\t4\tF2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB6.640176\tTest plate 4_1.SKB6.640176.Test.plate.4.F2_F2\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.4.F3\tGCATTCGGCGTT\tGTGTGYCAGCMGCCGCGGTAA\t4\tF3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB6.640176\tTest plate 4_1.SKB6.640176.Test.plate.4.F3_F3\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.4.F4\tTCTACATACATA\tGTGTGYCAGCMGCCGCGGTAA\t4\tF4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB6.640176\tTest plate 4_1.SKB6.640176.Test.plate.4.F4_F4\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.4.F5\tGAGTCTTGGTAA\tGTGTGYCAGCMGCCGCGGTAA\t4\tF5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB6.640176\tTest plate 4_1.SKB6.640176.Test.plate.4.F5_F5\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.4.F6\tCAGTCTAGTACG\tGTGTGYCAGCMGCCGCGGTAA\t4\tF6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB6.640176\tTest plate 4_1.SKB6.640176.Test.plate.4.F6_F6\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.4.F7\tGTTCGAGTGAAT\tGTGTGYCAGCMGCCGCGGTAA\t4\tF7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB6.640176\tTest plate 4_1.SKB6.640176.Test.plate.4.F7_F7\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.4.F8\tAGTCCGAGTTGT\tGTGTGYCAGCMGCCGCGGTAA\t4\tF8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB6.640176\tTest plate 4_1.SKB6.640176.Test.plate.4.F8_F8\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.4.F9\tCGTGAGGACCAG\tGTGTGYCAGCMGCCGCGGTAA\t4\tF9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB6.640176\tTest plate 4_1.SKB6.640176.Test.plate.4.F9_F9\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.4.F10\tCGGTTGGCGGGT\tGTGTGYCAGCMGCCGCGGTAA\t4\tF10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB6.640176\tTest plate 4_1.SKB6.640176.Test.plate.4.F10_F10\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKB6.640176.Test.plate.4.F11\tCGATTCCTTAAT\tGTGTGYCAGCMGCCGCGGTAA\t4\tF11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKB6.640176\tTest plate 4_1.SKB6.640176.Test.plate.4.F11_F11\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n1.SKM6.640187\tTGCCTGCTCGAC\tGTGTGYCAGCMGCCGCGGTAA\t4\tF12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\tCannabis Soils\t1.SKM6.640187\tTest plate 4_1.SKM6.640187_F12\tAnalysis of the Cannabis Plant Microbiome\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n"""
CONTROL_SAMPLES_PREP_EXAMPLE = """sample_name\tBARCODE\tPRIMER\tPrimer_Plate\tWell_ID\tPlating\tExtractionKit_lot\tExtraction_robot\tTM1000_8_tool\tPrimer_date\tMasterMix_lot\tWater_Lot\tProcessing_robot\tTM300_8_tool\tTM50_8_tool\tSample_Plate\tProject_name\tOrig_name\tWell_description\tEXPERIMENT_DESIGN_DESCRIPTION\tLIBRARY_CONSTRUCTION_PROTOCOL\tLINKER\tPLATFORM\tRUN_CENTER\tRUN_DATE\tRUN_PREFIX\tpcr_primers\tsequencing_meth\ttarget_gene\ttarget_subfragment\tcenter_name\tcenter_project_name\tINSTRUMENT_MODEL\tRUNID\nvibrio.positive.control.Test.plate.1.G1\tGGTGAGCAAGCA\tGTGTGYCAGCMGCCGCGGTAA\t1\tG1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tvibrio.positive.control.Test.plate.1.G1\tTest plate 1_vibrio.positive.control.Test.plate.1.G1_G1\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.1.G2\tTAAATATACCCT\tGTGTGYCAGCMGCCGCGGTAA\t1\tG2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tvibrio.positive.control.Test.plate.1.G2\tTest plate 1_vibrio.positive.control.Test.plate.1.G2_G2\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.1.G3\tTTGCGGACCCTA\tGTGTGYCAGCMGCCGCGGTAA\t1\tG3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tvibrio.positive.control.Test.plate.1.G3\tTest plate 1_vibrio.positive.control.Test.plate.1.G3_G3\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.1.G4\tGTCGTCCAAATG\tGTGTGYCAGCMGCCGCGGTAA\t1\tG4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tvibrio.positive.control.Test.plate.1.G4\tTest plate 1_vibrio.positive.control.Test.plate.1.G4_G4\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.1.G5\tTGCACAGTCGCT\tGTGTGYCAGCMGCCGCGGTAA\t1\tG5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tvibrio.positive.control.Test.plate.1.G5\tTest plate 1_vibrio.positive.control.Test.plate.1.G5_G5\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.1.G6\tTTACTGTGGCCG\tGTGTGYCAGCMGCCGCGGTAA\t1\tG6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tvibrio.positive.control.Test.plate.1.G6\tTest plate 1_vibrio.positive.control.Test.plate.1.G6_G6\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.1.G7\tGGTTCATGAACA\tGTGTGYCAGCMGCCGCGGTAA\t1\tG7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tvibrio.positive.control.Test.plate.1.G7\tTest plate 1_vibrio.positive.control.Test.plate.1.G7_G7\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.1.G8\tTAACAATAATTC\tGTGTGYCAGCMGCCGCGGTAA\t1\tG8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tvibrio.positive.control.Test.plate.1.G8\tTest plate 1_vibrio.positive.control.Test.plate.1.G8_G8\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.1.G9\tCTTATTAAACGT\tGTGTGYCAGCMGCCGCGGTAA\t1\tG9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tvibrio.positive.control.Test.plate.1.G9\tTest plate 1_vibrio.positive.control.Test.plate.1.G9_G9\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.1.G10\tGCTCGAAGATTC\tGTGTGYCAGCMGCCGCGGTAA\t1\tG10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tvibrio.positive.control.Test.plate.1.G10\tTest plate 1_vibrio.positive.control.Test.plate.1.G10_G10\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.1.G11\tTATTTGATTGGT\tGTGTGYCAGCMGCCGCGGTAA\t1\tG11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tvibrio.positive.control.Test.plate.1.G11\tTest plate 1_vibrio.positive.control.Test.plate.1.G11_G11\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.1.G12\tTGTCAAAGTGAC\tGTGTGYCAGCMGCCGCGGTAA\t1\tG12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tvibrio.positive.control.Test.plate.1.G12\tTest plate 1_vibrio.positive.control.Test.plate.1.G12_G12\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.1.H1\tCTATGTATTAGT\tGTGTGYCAGCMGCCGCGGTAA\t1\tH1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tblank.Test.plate.1.H1\tTest plate 1_blank.Test.plate.1.H1_H1\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.1.H2\tACTCCCGTGTGA\tGTGTGYCAGCMGCCGCGGTAA\t1\tH2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tblank.Test.plate.1.H2\tTest plate 1_blank.Test.plate.1.H2_H2\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.1.H3\tCGGTATAGCAAT\tGTGTGYCAGCMGCCGCGGTAA\t1\tH3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tblank.Test.plate.1.H3\tTest plate 1_blank.Test.plate.1.H3_H3\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.1.H4\tGACTCTGCTCAG\tGTGTGYCAGCMGCCGCGGTAA\t1\tH4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tblank.Test.plate.1.H4\tTest plate 1_blank.Test.plate.1.H4_H4\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.1.H5\tGTCATGCTCCAG\tGTGTGYCAGCMGCCGCGGTAA\t1\tH5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tblank.Test.plate.1.H5\tTest plate 1_blank.Test.plate.1.H5_H5\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.1.H6\tTACCGAAGGTAT\tGTGTGYCAGCMGCCGCGGTAA\t1\tH6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tblank.Test.plate.1.H6\tTest plate 1_blank.Test.plate.1.H6_H6\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.1.H7\tTGAGTATGAGTA\tGTGTGYCAGCMGCCGCGGTAA\t1\tH7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tblank.Test.plate.1.H7\tTest plate 1_blank.Test.plate.1.H7_H7\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.1.H8\tAATGGTTCAGCA\tGTGTGYCAGCMGCCGCGGTAA\t1\tH8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tblank.Test.plate.1.H8\tTest plate 1_blank.Test.plate.1.H8_H8\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.1.H9\tGAACCAGTACTC\tGTGTGYCAGCMGCCGCGGTAA\t1\tH9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tblank.Test.plate.1.H9\tTest plate 1_blank.Test.plate.1.H9_H9\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.1.H10\tCGCACCCATACA\tGTGTGYCAGCMGCCGCGGTAA\t1\tH10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tblank.Test.plate.1.H10\tTest plate 1_blank.Test.plate.1.H10_H10\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.1.H11\tGTGCCATAATCG\tGTGTGYCAGCMGCCGCGGTAA\t1\tH11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 1\t\tblank.Test.plate.1.H11\tTest plate 1_blank.Test.plate.1.H11_H11\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.2.G1\tTCAGACCAACTG\tGTGTGYCAGCMGCCGCGGTAA\t2\tG1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tvibrio.positive.control.Test.plate.2.G1\tTest plate 2_vibrio.positive.control.Test.plate.2.G1_G1\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.2.G2\tCCACGAGCAGGC\tGTGTGYCAGCMGCCGCGGTAA\t2\tG2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tvibrio.positive.control.Test.plate.2.G2\tTest plate 2_vibrio.positive.control.Test.plate.2.G2_G2\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.2.G3\tGCGTGCCCGGCC\tGTGTGYCAGCMGCCGCGGTAA\t2\tG3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tvibrio.positive.control.Test.plate.2.G3\tTest plate 2_vibrio.positive.control.Test.plate.2.G3_G3\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.2.G4\tCAAAGGAGCCCG\tGTGTGYCAGCMGCCGCGGTAA\t2\tG4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tvibrio.positive.control.Test.plate.2.G4\tTest plate 2_vibrio.positive.control.Test.plate.2.G4_G4\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.2.G5\tTGCGGCGTCAGG\tGTGTGYCAGCMGCCGCGGTAA\t2\tG5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tvibrio.positive.control.Test.plate.2.G5\tTest plate 2_vibrio.positive.control.Test.plate.2.G5_G5\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.2.G6\tCGCTGTGGATTA\tGTGTGYCAGCMGCCGCGGTAA\t2\tG6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tvibrio.positive.control.Test.plate.2.G6\tTest plate 2_vibrio.positive.control.Test.plate.2.G6_G6\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.2.G7\tCTTGCTCATAAT\tGTGTGYCAGCMGCCGCGGTAA\t2\tG7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tvibrio.positive.control.Test.plate.2.G7\tTest plate 2_vibrio.positive.control.Test.plate.2.G7_G7\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.2.G8\tACGACAACGGGC\tGTGTGYCAGCMGCCGCGGTAA\t2\tG8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tvibrio.positive.control.Test.plate.2.G8\tTest plate 2_vibrio.positive.control.Test.plate.2.G8_G8\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.2.G9\tCTAGCGTGCGTT\tGTGTGYCAGCMGCCGCGGTAA\t2\tG9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tvibrio.positive.control.Test.plate.2.G9\tTest plate 2_vibrio.positive.control.Test.plate.2.G9_G9\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.2.G10\tTAGTCTAAGGGT\tGTGTGYCAGCMGCCGCGGTAA\t2\tG10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tvibrio.positive.control.Test.plate.2.G10\tTest plate 2_vibrio.positive.control.Test.plate.2.G10_G10\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.2.G11\tGTTTGAAACACG\tGTGTGYCAGCMGCCGCGGTAA\t2\tG11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tvibrio.positive.control.Test.plate.2.G11\tTest plate 2_vibrio.positive.control.Test.plate.2.G11_G11\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.2.G12\tACCTCAGTCAAG\tGTGTGYCAGCMGCCGCGGTAA\t2\tG12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tvibrio.positive.control.Test.plate.2.G12\tTest plate 2_vibrio.positive.control.Test.plate.2.G12_G12\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.2.H1\tTCATTAGCGTGG\tGTGTGYCAGCMGCCGCGGTAA\t2\tH1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tblank.Test.plate.2.H1\tTest plate 2_blank.Test.plate.2.H1_H1\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.2.H2\tCGCCGTACTTGC\tGTGTGYCAGCMGCCGCGGTAA\t2\tH2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tblank.Test.plate.2.H2\tTest plate 2_blank.Test.plate.2.H2_H2\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.2.H3\tTAAACCTGGACA\tGTGTGYCAGCMGCCGCGGTAA\t2\tH3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tblank.Test.plate.2.H3\tTest plate 2_blank.Test.plate.2.H3_H3\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.2.H4\tCCAACCCAGATC\tGTGTGYCAGCMGCCGCGGTAA\t2\tH4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tblank.Test.plate.2.H4\tTest plate 2_blank.Test.plate.2.H4_H4\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.2.H5\tTTAAGTTAAGTT\tGTGTGYCAGCMGCCGCGGTAA\t2\tH5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tblank.Test.plate.2.H5\tTest plate 2_blank.Test.plate.2.H5_H5\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.2.H6\tAGCCGCGGGTCC\tGTGTGYCAGCMGCCGCGGTAA\t2\tH6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tblank.Test.plate.2.H6\tTest plate 2_blank.Test.plate.2.H6_H6\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.2.H7\tGGTAGTTCATAG\tGTGTGYCAGCMGCCGCGGTAA\t2\tH7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tblank.Test.plate.2.H7\tTest plate 2_blank.Test.plate.2.H7_H7\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.2.H8\tCGATGAATATCG\tGTGTGYCAGCMGCCGCGGTAA\t2\tH8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tblank.Test.plate.2.H8\tTest plate 2_blank.Test.plate.2.H8_H8\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.2.H9\tGTTCTAAGGTGA\tGTGTGYCAGCMGCCGCGGTAA\t2\tH9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tblank.Test.plate.2.H9\tTest plate 2_blank.Test.plate.2.H9_H9\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.2.H10\tATGACTAAGATG\tGTGTGYCAGCMGCCGCGGTAA\t2\tH10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tblank.Test.plate.2.H10\tTest plate 2_blank.Test.plate.2.H10_H10\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.2.H11\tTACAGCGCATAC\tGTGTGYCAGCMGCCGCGGTAA\t2\tH11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 2\t\tblank.Test.plate.2.H11\tTest plate 2_blank.Test.plate.2.H11_H11\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.3.G1\tGCTGCGTATACC\tGTGTGYCAGCMGCCGCGGTAA\t3\tG1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tvibrio.positive.control.Test.plate.3.G1\tTest plate 3_vibrio.positive.control.Test.plate.3.G1_G1\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.3.G2\tCTCAGCGGGACG\tGTGTGYCAGCMGCCGCGGTAA\t3\tG2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tvibrio.positive.control.Test.plate.3.G2\tTest plate 3_vibrio.positive.control.Test.plate.3.G2_G2\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.3.G3\tATGCCTCGTAAG\tGTGTGYCAGCMGCCGCGGTAA\t3\tG3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tvibrio.positive.control.Test.plate.3.G3\tTest plate 3_vibrio.positive.control.Test.plate.3.G3_G3\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.3.G4\tTTAGTTTGTCAC\tGTGTGYCAGCMGCCGCGGTAA\t3\tG4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tvibrio.positive.control.Test.plate.3.G4\tTest plate 3_vibrio.positive.control.Test.plate.3.G4_G4\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.3.G5\tCCGGCCGCGTGC\tGTGTGYCAGCMGCCGCGGTAA\t3\tG5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tvibrio.positive.control.Test.plate.3.G5\tTest plate 3_vibrio.positive.control.Test.plate.3.G5_G5\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.3.G6\tATTATGATTATG\tGTGTGYCAGCMGCCGCGGTAA\t3\tG6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tvibrio.positive.control.Test.plate.3.G6\tTest plate 3_vibrio.positive.control.Test.plate.3.G6_G6\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.3.G7\tCGAATACTGACA\tGTGTGYCAGCMGCCGCGGTAA\t3\tG7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tvibrio.positive.control.Test.plate.3.G7\tTest plate 3_vibrio.positive.control.Test.plate.3.G7_G7\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.3.G8\tTCTTATAACGCT\tGTGTGYCAGCMGCCGCGGTAA\t3\tG8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tvibrio.positive.control.Test.plate.3.G8\tTest plate 3_vibrio.positive.control.Test.plate.3.G8_G8\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.3.G9\tTAAGGTCGATAA\tGTGTGYCAGCMGCCGCGGTAA\t3\tG9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tvibrio.positive.control.Test.plate.3.G9\tTest plate 3_vibrio.positive.control.Test.plate.3.G9_G9\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.3.G10\tGTTGCTGAGTCC\tGTGTGYCAGCMGCCGCGGTAA\t3\tG10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tvibrio.positive.control.Test.plate.3.G10\tTest plate 3_vibrio.positive.control.Test.plate.3.G10_G10\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.3.G11\tACACCGCACAAT\tGTGTGYCAGCMGCCGCGGTAA\t3\tG11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tvibrio.positive.control.Test.plate.3.G11\tTest plate 3_vibrio.positive.control.Test.plate.3.G11_G11\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.3.G12\tCACAACCACAAC\tGTGTGYCAGCMGCCGCGGTAA\t3\tG12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tvibrio.positive.control.Test.plate.3.G12\tTest plate 3_vibrio.positive.control.Test.plate.3.G12_G12\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.3.H1\tGAGAAGCTTATA\tGTGTGYCAGCMGCCGCGGTAA\t3\tH1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tblank.Test.plate.3.H1\tTest plate 3_blank.Test.plate.3.H1_H1\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.3.H2\tGTTAACTTACTA\tGTGTGYCAGCMGCCGCGGTAA\t3\tH2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tblank.Test.plate.3.H2\tTest plate 3_blank.Test.plate.3.H2_H2\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.3.H3\tGTTGTTCTGGGA\tGTGTGYCAGCMGCCGCGGTAA\t3\tH3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tblank.Test.plate.3.H3\tTest plate 3_blank.Test.plate.3.H3_H3\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.3.H4\tAGGGTGACTTTA\tGTGTGYCAGCMGCCGCGGTAA\t3\tH4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tblank.Test.plate.3.H4\tTest plate 3_blank.Test.plate.3.H4_H4\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.3.H5\tGCCGCCAGGGTC\tGTGTGYCAGCMGCCGCGGTAA\t3\tH5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tblank.Test.plate.3.H5\tTest plate 3_blank.Test.plate.3.H5_H5\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.3.H6\tGCCACCGCCGGA\tGTGTGYCAGCMGCCGCGGTAA\t3\tH6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tblank.Test.plate.3.H6\tTest plate 3_blank.Test.plate.3.H6_H6\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.3.H7\tACACACCCTGAC\tGTGTGYCAGCMGCCGCGGTAA\t3\tH7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tblank.Test.plate.3.H7\tTest plate 3_blank.Test.plate.3.H7_H7\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.3.H8\tTATAGGCTCCGC\tGTGTGYCAGCMGCCGCGGTAA\t3\tH8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tblank.Test.plate.3.H8\tTest plate 3_blank.Test.plate.3.H8_H8\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.3.H9\tATAATTGCCGAG\tGTGTGYCAGCMGCCGCGGTAA\t3\tH9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tblank.Test.plate.3.H9\tTest plate 3_blank.Test.plate.3.H9_H9\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.3.H10\tCGGAGAGACATG\tGTGTGYCAGCMGCCGCGGTAA\t3\tH10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tblank.Test.plate.3.H10\tTest plate 3_blank.Test.plate.3.H10_H10\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.3.H11\tCAGCCCTACCCA\tGTGTGYCAGCMGCCGCGGTAA\t3\tH11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 3\t\tblank.Test.plate.3.H11\tTest plate 3_blank.Test.plate.3.H11_H11\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.4.G1\tTACTGTACTGTT\tGTGTGYCAGCMGCCGCGGTAA\t4\tG1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tvibrio.positive.control.Test.plate.4.G1\tTest plate 4_vibrio.positive.control.Test.plate.4.G1_G1\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.4.G2\tTCTCGCACTGGA\tGTGTGYCAGCMGCCGCGGTAA\t4\tG2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tvibrio.positive.control.Test.plate.4.G2\tTest plate 4_vibrio.positive.control.Test.plate.4.G2_G2\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.4.G3\tACCAGTGACTCA\tGTGTGYCAGCMGCCGCGGTAA\t4\tG3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tvibrio.positive.control.Test.plate.4.G3\tTest plate 4_vibrio.positive.control.Test.plate.4.G3_G3\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.4.G4\tTGGCGCACGGAC\tGTGTGYCAGCMGCCGCGGTAA\t4\tG4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tvibrio.positive.control.Test.plate.4.G4\tTest plate 4_vibrio.positive.control.Test.plate.4.G4_G4\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.4.G5\tCATTTACATCAC\tGTGTGYCAGCMGCCGCGGTAA\t4\tG5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tvibrio.positive.control.Test.plate.4.G5\tTest plate 4_vibrio.positive.control.Test.plate.4.G5_G5\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.4.G6\tGTGGGACTGCGC\tGTGTGYCAGCMGCCGCGGTAA\t4\tG6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tvibrio.positive.control.Test.plate.4.G6\tTest plate 4_vibrio.positive.control.Test.plate.4.G6_G6\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.4.G7\tCGGCCTAAGTTC\tGTGTGYCAGCMGCCGCGGTAA\t4\tG7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tvibrio.positive.control.Test.plate.4.G7\tTest plate 4_vibrio.positive.control.Test.plate.4.G7_G7\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.4.G8\tGCTGAGCCTTTG\tGTGTGYCAGCMGCCGCGGTAA\t4\tG8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tvibrio.positive.control.Test.plate.4.G8\tTest plate 4_vibrio.positive.control.Test.plate.4.G8_G8\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.4.G9\tAGAGACGCGTAG\tGTGTGYCAGCMGCCGCGGTAA\t4\tG9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tvibrio.positive.control.Test.plate.4.G9\tTest plate 4_vibrio.positive.control.Test.plate.4.G9_G9\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.4.G10\tCCACCGGGCCGA\tGTGTGYCAGCMGCCGCGGTAA\t4\tG10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tvibrio.positive.control.Test.plate.4.G10\tTest plate 4_vibrio.positive.control.Test.plate.4.G10_G10\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.4.G11\tAATCCGGTCACC\tGTGTGYCAGCMGCCGCGGTAA\t4\tG11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tvibrio.positive.control.Test.plate.4.G11\tTest plate 4_vibrio.positive.control.Test.plate.4.G11_G11\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nvibrio.positive.control.Test.plate.4.G12\tTCTTACCCATAA\tGTGTGYCAGCMGCCGCGGTAA\t4\tG12\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tvibrio.positive.control.Test.plate.4.G12\tTest plate 4_vibrio.positive.control.Test.plate.4.G12_G12\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.4.H1\tCTAGAGCTCCCA\tGTGTGYCAGCMGCCGCGGTAA\t4\tH1\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tblank.Test.plate.4.H1\tTest plate 4_blank.Test.plate.4.H1_H1\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.4.H2\tGGTCTTAGCACC\tGTGTGYCAGCMGCCGCGGTAA\t4\tH2\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tblank.Test.plate.4.H2\tTest plate 4_blank.Test.plate.4.H2_H2\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.4.H3\tGCCTACTCTCGG\tGTGTGYCAGCMGCCGCGGTAA\t4\tH3\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tblank.Test.plate.4.H3\tTest plate 4_blank.Test.plate.4.H3_H3\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.4.H4\tACTGCCCGATAC\tGTGTGYCAGCMGCCGCGGTAA\t4\tH4\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tblank.Test.plate.4.H4\tTest plate 4_blank.Test.plate.4.H4_H4\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.4.H5\tTTCTTAACGCCT\tGTGTGYCAGCMGCCGCGGTAA\t4\tH5\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tblank.Test.plate.4.H5\tTest plate 4_blank.Test.plate.4.H5_H5\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.4.H6\tCTCCCGAGCTCC\tGTGTGYCAGCMGCCGCGGTAA\t4\tH6\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tblank.Test.plate.4.H6\tTest plate 4_blank.Test.plate.4.H6_H6\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.4.H7\tTAGACTTCAGAG\tGTGTGYCAGCMGCCGCGGTAA\t4\tH7\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tblank.Test.plate.4.H7\tTest plate 4_blank.Test.plate.4.H7_H7\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.4.H8\tACTTAGACTCTT\tGTGTGYCAGCMGCCGCGGTAA\t4\tH8\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tblank.Test.plate.4.H8\tTest plate 4_blank.Test.plate.4.H8_H8\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.4.H9\tGGACCTGGATGG\tGTGTGYCAGCMGCCGCGGTAA\t4\tH9\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tblank.Test.plate.4.H9\tTest plate 4_blank.Test.plate.4.H9_H9\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.4.H10\tTATGTGCCGGCT\tGTGTGYCAGCMGCCGCGGTAA\t4\tH10\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tblank.Test.plate.4.H10\tTest plate 4_blank.Test.plate.4.H10_H10\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\nblank.Test.plate.4.H11\tATACCGTCTTTC\tGTGTGYCAGCMGCCGCGGTAA\t4\tH11\ttest@foo.bar\t157022406\tJER-E_KF1\t108379Z\t2017-10-23 19:10:25\t443912\tRNBF7110\tLUCY\t109375A\t311411B\tTest plate 4\t\tblank.Test.plate.4.H11\tTest plate 4_blank.Test.plate.4.H11_H11\t\tIllumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4\tGT\tIllumina\tUCSDMI\t\t\tFWD:GTGYCAGCMGCCGCGGTAA; REV:GGACTACNVGGGTWTCTAAT\tSequencing by synthesis\t16S rRNA\tV4\tUCSDMI\t\tMiSeq\t\n"""
if __name__ == '__main__':
main()
| 125.615197
| 149,869
| 0.650779
| 67,144
| 515,776
| 4.835562
| 0.033048
| 0.044989
| 0.026291
| 0.031071
| 0.874904
| 0.855408
| 0.831316
| 0.765886
| 0.749214
| 0.738169
| 0
| 0.167171
| 0.2457
| 515,776
| 4,105
| 149,870
| 125.645798
| 0.667372
| 0.010274
| 0
| 0.669739
| 0
| 0.003687
| 0.576537
| 0.446381
| 0
| 0
| 0
| 0
| 0.099026
| 1
| 0.016592
| false
| 0.000263
| 0.00395
| 0.000263
| 0.02423
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
2a6f1b9bb24cd66f3d50bd45aa1060b44276b9b2
| 40,063
|
py
|
Python
|
Code/method_collector.py
|
Jinwon-DK/GaitAnalysis
|
6b7be4aae9963b8986519af5bcbff39f32ebf2cd
|
[
"MIT"
] | null | null | null |
Code/method_collector.py
|
Jinwon-DK/GaitAnalysis
|
6b7be4aae9963b8986519af5bcbff39f32ebf2cd
|
[
"MIT"
] | null | null | null |
Code/method_collector.py
|
Jinwon-DK/GaitAnalysis
|
6b7be4aae9963b8986519af5bcbff39f32ebf2cd
|
[
"MIT"
] | null | null | null |
from random import random, seed, sample
import numpy as np
import datetime
import time
import Code.preprocessing as pp
method_info = {
'specific': ['cropping'],
'4columns': ['BasicNet', 'ResNet', 'VGG'],
'3columns': ['base', 'lstm', 'bi-lstm', 'lstm_attention', 'cnn_lstm'],
'2columns': ['lgbm']
}
def remove_subject(rsub):
pn_list = list()
for target in rsub:
pn, cn = target.endswith('.csv').spliat('_')
pn_list.append((pn, cn))
return pn_list
def method_base(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = baseDP(param.method, param.model_name, dataset=datasets, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_sn(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = snDP(param.method, param.model_name, dataset=datasets)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_leaveone(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.method is "cropping":
divide_process = LeaveOneDP_ns(param.method, param.model_name, dataset=datasets, rsub=None)
else:
divide_process = LeaveOneDP(param.method, param.model_name, dataset=datasets, rsub=None)
if param.datatype == "disease":
divide_process.nb_class += 1
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(divide_process.nb_people):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_sleaveone(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = LeaveOneDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(divide_process.nb_people):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_fa_leaveone(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = LeaveOneDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(divide_process.nb_people):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_mdpi(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = mdpiDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_dhalf(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = mdpi_dhalfDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_half(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = mdpi_halfDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_MCCV(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = mdpi_MCCVDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_CV(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
if param.collect["CrossValidation"] == 7:
divide_process = seven_CVDP(param.method, param.model_name, dataset=datasets
, rsub=None)
else:
param.cv_ratio = param.collect["CrossValidation"]
divide_process = select_CVDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(len(sample_train)):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
# Base Divide Process Class
class BaseDivideProcess:
def __init__(self, mode, model_name, dataset):
assert len(dataset) == 3, "dataset must be 3 arguments"
data1, data2, data3 = dataset
# [data1, data2, data3] = pp.sort_by_people(dataset)
data1 = data1[data1[:, -2].argsort()]
data2 = data2[data2[:, -2].argsort()]
data3 = data3[data3[:, -2].argsort()]
# sampling func name
self.mode = mode
# used model name
self.model_name = model_name
self.plabel = data1[:, -2]
self.tlabel = data1[:, -1]
# dataset index
self.data1 = data1[:, :-2]
self.data2 = data2[:, :-2]
self.data3 = data3[:, :-2]
self.nb_class = int(max(self.tlabel))
self.nb_people = int(max(self.plabel)) + 1
def sampling(self):
pass
def convert(self, data, mt, comb):
drow, dcol = data.shape
input_shape = (int(mt * comb), int((dcol) / (mt * comb)))
if self.model_name in method_info['4columns']:
converted = data.reshape(-1, input_shape[0], input_shape[1], 1)
elif self.model_name == "pVGG":
data = data.reshape(-1, input_shape[0], input_shape[1])
converted = np.zeros((data.shape[0], data.shape[1], data.shape[2], 3))
for idx in range(3):
converted[:, :, :, idx] = data
elif self.model_name in method_info['3columns']:
converted = data.reshape(-1, input_shape[0], input_shape[1])
elif self.model_name in method_info['2columns']:
converted = data
elif self.model_name in method_info['specific']:
converted = data
return converted
# 1000, 1000 sampling Class
class baseDP(BaseDivideProcess):
"""
Sn 600-900 sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(20):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
dataset_list = list()
random_list = sample(range(drow), drow)
for dataset in [self.data1, self.data2, self.data3]:
dataset_list.append(dataset[random_list])
targetp = self.plabel[random_list]
targetc = self.tlabel[random_list]
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = dataset[:1000, :]
test_dict[f"data_{i}"] = dataset[1000:2000, :]
train_dict["people"] = targetp[:1000]
train_dict["tag"] = targetc[:1000]
test_dict["people"] = targetp[1000:2000]
test_dict["tag"] = targetc[1000:2000]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# 600-900 sampling Class
class snDP(BaseDivideProcess):
"""
Sn 600-900 sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(20):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
for class_target in range(self.nb_class):
find_idx = []
count_idx = 0
for idx in range(drow):
if self.tlabel[idx] == class_target:
find_idx.append(idx)
count_idx += 1
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
target = dataset[find_idx[0]:find_idx[-1] + 1, :]
dataset_list.append(target)
targetp = self.plabel[find_idx[0]:find_idx[-1] + 1]
targetc = self.tlabel[find_idx[0]:find_idx[-1] + 1]
random_list = sample(range(count_idx), count_idx)
for i, target in enumerate(dataset_list):
dataset_list[i] = target[random_list]
targetp = targetp[random_list]
targetc = targetc[random_list]
if class_target == 0:
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = dataset[:200, :]
test_dict[f"data_{i}"] = dataset[200:, :]
train_dict["people"] = targetp[:200]
train_dict["tag"] = targetc[:200]
test_dict["people"] = targetp[200:]
test_dict["tag"] = targetc[200:]
else:
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = np.vstack([train_dict[f"data_{i}"], dataset[:200, :]])
test_dict[f"data_{i}"] = np.vstack([test_dict[f"data_{i}"], dataset[200:, :]])
train_dict["people"] = np.concatenate([train_dict["people"], targetp[:200]])
train_dict["tag"] = np.concatenate([train_dict["tag"], targetc[:200]])
test_dict["people"] = np.concatenate([test_dict["people"], targetp[200:]])
test_dict["tag"] = np.concatenate([test_dict["tag"], targetc[200:]])
other_samples, _ = train_dict["data_0"].shape
random_list = sample(range(other_samples), 600)
train_dict["people"] = train_dict["people"][random_list]
train_dict["tag"] = train_dict["tag"][random_list]
for i in range(3):
train_dict[f"data_{i}"] = train_dict[f"data_{i}"][random_list]
other_samples, _ = test_dict["data_0"].shape
random_list = sample(range(other_samples), 900)
test_dict["people"] = test_dict["people"][random_list]
test_dict["tag"] = test_dict["tag"][random_list]
for i in range(3):
test_dict[f"data_{i}"] = test_dict[f"data_{i}"][random_list]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class
class LeaveOneDP(BaseDivideProcess):
"""
LeaveOne sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for peo_target in range(self.nb_people):
train_dict = dict()
test_dict = dict()
dataset_list = list()
train_list = list()
find_idx = []
count_idx = 0
drow, _ = self.data1.shape
for idx in range(drow):
if self.plabel[idx] == peo_target:
find_idx.append(idx)
count_idx += 1
for dataset in [self.data1, self.data2, self.data3]:
target = dataset[find_idx[0]:find_idx[-1] + 1, :]
if find_idx[0] == 0:
train = dataset[find_idx[-1] + 1:, :]
elif find_idx[0] != 0 and find_idx[-1] + 1 != drow:
temp1 = dataset[:find_idx[0], :]
temp2 = dataset[find_idx[-1] + 1:, :]
train = np.vstack([temp1, temp2])
elif find_idx[-1] + 1 == drow:
train = dataset[:find_idx[-1] + 1, :]
dataset_list.append(target)
train_list.append(train)
targetp = self.plabel[find_idx[0]:find_idx[-1] + 1]
targetc = self.tlabel[find_idx[0]:find_idx[-1] + 1]
if find_idx[0] == 0:
trainp = self.plabel[find_idx[-1] + 1:]
trainc = self.tlabel[find_idx[-1] + 1:]
elif find_idx[0] != 0 and find_idx[-1] + 1 != drow:
temp1 = self.plabel[:find_idx[0]]
temp2 = self.plabel[find_idx[-1] + 1:]
trainp = np.concatenate([temp1, temp2])
temp1 = self.tlabel[:find_idx[0]]
temp2 = self.tlabel[find_idx[-1] + 1:]
trainc = np.concatenate([temp1, temp2])
elif find_idx[-1] + 1 == drow:
trainp = self.plabel[:find_idx[-1] + 1]
trainc = self.tlabel[:find_idx[-1] + 1]
target_indexes, _ = dataset_list[0].shape
train_indexes, _ = train_list[0].shape
random_list1 = sample(range(target_indexes), target_indexes)
random_list2 = sample(range(train_indexes), train_indexes)
for i, dataset in enumerate(dataset_list):
test_dict[f"data_{i}"] = dataset[random_list1]
test_dict["people"] = targetp[random_list1]
test_dict["tag"] = targetc[random_list1]
for i, dataset in enumerate(train_list):
train_dict[f"data_{i}"] = dataset[random_list2]
train_dict["people"] = trainp[random_list2]
train_dict["tag"] = trainc[random_list2]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class no shuffle
class LeaveOneDP_ns(BaseDivideProcess):
"""
LeaveOne sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for peo_target in range(self.nb_people):
train_dict = dict()
test_dict = dict()
dataset_list = list()
train_list = list()
find_idx = []
count_idx = 0
drow, _ = self.data1.shape
for idx in range(drow):
if self.plabel[idx] == peo_target:
find_idx.append(idx)
count_idx += 1
for dataset in [self.data1, self.data2, self.data3]:
target = dataset[find_idx[0]:find_idx[-1] + 1, :]
if find_idx[0] == 0:
train = dataset[find_idx[-1] + 1:, :]
elif find_idx[0] != 0 and find_idx[-1] + 1 != drow:
temp1 = dataset[:find_idx[0], :]
temp2 = dataset[find_idx[-1] + 1:, :]
train = np.vstack([temp1, temp2])
elif find_idx[-1] + 1 == drow:
train = dataset[:find_idx[-1] + 1, :]
dataset_list.append(target)
train_list.append(train)
targetp = self.plabel[find_idx[0]:find_idx[-1] + 1]
targetc = self.tlabel[find_idx[0]:find_idx[-1] + 1]
if find_idx[0] == 0:
trainp = self.plabel[find_idx[-1] + 1:]
trainc = self.tlabel[find_idx[-1] + 1:]
elif find_idx[0] != 0 and find_idx[-1] + 1 != drow:
temp1 = self.plabel[:find_idx[0]]
temp2 = self.plabel[find_idx[-1] + 1:]
trainp = np.concatenate([temp1, temp2])
temp1 = self.tlabel[:find_idx[0]]
temp2 = self.tlabel[find_idx[-1] + 1:]
trainc = np.concatenate([temp1, temp2])
elif find_idx[-1] + 1 == drow:
trainp = self.plabel[:find_idx[-1] + 1]
trainc = self.tlabel[:find_idx[-1] + 1]
for i, dataset in enumerate(dataset_list):
test_dict[f"data_{i}"] = dataset
test_dict["people"] = targetp
test_dict["tag"] = targetc
for i, dataset in enumerate(train_list):
train_dict[f"data_{i}"] = dataset
train_dict["people"] = trainp
train_dict["tag"] = trainc
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class
class mdpiDP(BaseDivideProcess):
"""
mdpi sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(20):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
for people_target in range(self.nb_people):
find_idx = []
for idx in range(drow):
if self.plabel[idx] == people_target:
find_idx.append(idx)
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
target = dataset[find_idx[0]:find_idx[-1] + 1, :]
dataset_list.append(target)
targetp = self.plabel[find_idx[0]:find_idx[-1] + 1]
targetc = self.tlabel[find_idx[0]:find_idx[-1] + 1]
for class_target in range(self.nb_class):
find_idx = []
count_idx = 0
for idx in range(dataset_list[0].shape[0]):
if targetc[idx] == class_target + 1:
find_idx.append(idx)
count_idx += 1
class_list = list()
try:
for dataset in dataset_list:
target = dataset[find_idx[0]:find_idx[-1] + 1, :]
class_list.append(target)
sec_targetp = targetp[find_idx[0]:find_idx[-1] + 1]
sec_targetc = targetc[find_idx[0]:find_idx[-1] + 1]
except:
class_list = list()
continue
random_list = sample(range(count_idx), count_idx)
for i, target in enumerate(class_list):
class_list[i] = target[random_list]
sec_targetp = sec_targetp[random_list]
sec_targetc = sec_targetc[random_list]
if people_target == 0:
for i, dataset in enumerate(class_list):
train_dict[f"data_{i}"] = dataset[:3, :]
test_dict[f"data_{i}"] = dataset[3:50, :]
train_dict["people"] = sec_targetp[:3]
train_dict["tag"] = sec_targetc[:3]
test_dict["people"] = sec_targetp[3:50]
test_dict["tag"] = sec_targetc[3:50]
else:
for i, dataset in enumerate(class_list):
train_dict[f"data_{i}"] = np.vstack([train_dict[f"data_{i}"], dataset[:3, :]])
test_dict[f"data_{i}"] = np.vstack([test_dict[f"data_{i}"], dataset[3:50, :]])
train_dict["people"] = np.concatenate([train_dict["people"], sec_targetp[:3]])
train_dict["tag"] = np.concatenate([train_dict["tag"], sec_targetc[:3]])
test_dict["people"] = np.concatenate([test_dict["people"], sec_targetp[3:50]])
test_dict["tag"] = np.concatenate([test_dict["tag"], sec_targetc[3:50]])
other_samples, _ = train_dict["data_0"].shape
random_list = sample(range(other_samples), other_samples)
train_dict["people"] = train_dict["people"][random_list]
train_dict["tag"] = train_dict["tag"][random_list]
for i in range(3):
train_dict[f"data_{i}"] = train_dict[f"data_{i}"][random_list]
other_samples, _ = test_dict["data_0"].shape
random_list = sample(range(other_samples), other_samples)
test_dict["people"] = test_dict["people"][random_list]
test_dict["tag"] = test_dict["tag"][random_list]
for i in range(3):
test_dict[f"data_{i}"] = test_dict[f"data_{i}"][random_list]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class
class mdpi_dhalfDP(BaseDivideProcess):
"""
mdpi sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(20):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
rindx_list = sample(range(drow), drow)
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
randomized = dataset[rindx_list]
dataset_list.append(randomized)
targetc = self.tlabel[rindx_list]
targetp = self.plabel[rindx_list]
half_idx = int(drow / 2)
# get decimal
result = 0
previous = 0
n = 10
while result == 0:
output = round(half_idx // n)
if output == 0:
n = n / 10
result = previous * n
else:
previous = output
n = n * 10
drop_idx = int(result)
# drop_idx = 10**(len(half_idx) - 1)
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = dataset[:drop_idx, :]
test_dict[f"data_{i}"] = dataset[drop_idx:2*drop_idx, :]
train_dict["people"] = targetp[:drop_idx]
train_dict["tag"] = targetc[:drop_idx]
test_dict["people"] = targetp[drop_idx:2*drop_idx]
test_dict["tag"] = targetc[drop_idx:2*drop_idx]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class
class mdpi_halfDP(BaseDivideProcess):
"""
mdpi sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(20):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
rindx_list = sample(range(drow), drow)
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
randomized = dataset[rindx_list]
dataset_list.append(randomized)
targetc = self.tlabel[rindx_list]
targetp = self.plabel[rindx_list]
half_idx = int(drow/2)
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = dataset[:half_idx, :]
test_dict[f"data_{i}"] = dataset[half_idx:, :]
train_dict["people"] = targetp[:half_idx]
train_dict["tag"] = targetc[:half_idx]
test_dict["people"] = targetp[half_idx:]
test_dict["tag"] = targetc[half_idx:]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class
class mdpi_MCCVDP(BaseDivideProcess):
"""
mdpi sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(20):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
rindx_list = sample(range(drow), drow)
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
randomized = dataset[rindx_list]
dataset_list.append(randomized)
targetc = self.tlabel[rindx_list]
targetp = self.plabel[rindx_list]
mcv_rate = int(drow * 0.7)
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = dataset[:mcv_rate, :]
test_dict[f"data_{i}"] = dataset[mcv_rate:, :]
train_dict["people"] = targetp[:mcv_rate]
train_dict["tag"] = targetc[:mcv_rate]
test_dict["people"] = targetp[mcv_rate:]
test_dict["tag"] = targetc[mcv_rate:]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# 7 - Cross Validation sampling Class
class seven_CVDP(BaseDivideProcess):
"""
mdpi sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(5):
seed(repeat)
drow, _ = self.data1.shape
rindx_list = sample(range(drow), drow)
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
randomized = dataset[rindx_list]
dataset_list.append(randomized)
targetc = self.tlabel[rindx_list]
targetp = self.plabel[rindx_list]
cv_rate = int(drow / 7)
for cvi in range(7):
train_dict = dict()
test_dict = dict()
for i, dataset in enumerate(dataset_list):
test_dict[f"data_{i}"] = dataset[cv_rate*cvi: cv_rate*(cvi+1), :]
test_dict["people"] = targetp[cv_rate*cvi: cv_rate*(cvi+1)]
test_dict["tag"] = targetc[cv_rate*cvi: cv_rate*(cvi+1)]
indexing = np.arange(cv_rate*cvi, cv_rate*(cvi+1))
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = np.array([element for idx, element in enumerate(dataset) if idx not in indexing])
train_dict["people"] = np.array([element for idx, element in enumerate(targetp) if idx not in indexing])
train_dict["tag"] = np.array([element for idx, element in enumerate(targetc) if idx not in indexing])
# if cvi == 0:
# for i, dataset in enumerate(dataset_list):
# test_dict[f"data_{i}"] = dataset[cv_rate:, :]
# test_dict["people"] = targetp[cv_rate:]
# test_dict["tag"] = targetc[cv_rate:]
# elif cvi == 6:
# for i, dataset in enumerate(dataset_list):
# test_dict[f"data_{i}"] = dataset[:cv_rate*cvi, :]
# test_dict["people"] = targetp[:cv_rate*cvi]
# test_dict["tag"] = targetc[:cv_rate*cvi]
# else:
# for i, dataset in enumerate(dataset_list):
# temp1 = dataset[:cv_rate*cvi, :]
# temp2 = dataset[cv_rate*(cvi+1):, :]
# test_dict[f"data_{i}"] = np.vstack([temp1, temp2])
# test_dict["people"] = np.vstack([targetp[:cv_rate*cvi], targetp[cv_rate*(cvi+1):]])
# test_dict["tag"] = np.vstack([targetc[:cv_rate*cvi], targetc[cv_rate*(cvi+1):]])
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# Selected Cross Validation sampling Class
class select_CVDP(BaseDivideProcess):
NotImplemented
"""
mdpi sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(10):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
rindx_list = sample(range(drow), drow)
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
randomized = dataset[rindx_list]
dataset_list.append(randomized)
targetc = self.tlabel[rindx_list]
targetp = self.plabel[rindx_list]
cv_rate = int(drow / 7)
for cvi in range(7):
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = dataset[cv_rate*cvi: cv_rate*cvi+1, :]
train_dict["people"] = targetp[:cv_rate]
train_dict["tag"] = targetc[:cv_rate]
if cvi == 0:
for i, dataset in enumerate(dataset_list):
test_dict[f"data_{i}"] = dataset[cv_rate:, :]
test_dict["people"] = targetp[cv_rate:]
test_dict["tag"] = targetc[cv_rate:]
elif cvi == 6:
for i, dataset in enumerate(dataset_list):
test_dict[f"data_{i}"] = dataset[:cv_rate*cvi, :]
test_dict["people"] = targetp[:cv_rate*cvi]
test_dict["tag"] = targetc[:cv_rate*cvi]
else:
for i, dataset in enumerate(dataset_list):
temp1 = dataset[:cv_rate*cvi, :]
temp2 = dataset[cv_rate*cvi+1:, :]
test_dict[f"data_{i}"] = np.vstack([temp1, temp2])
test_dict["people"] = np.vstack([targetp[:cv_rate*cvi], targetp[cv_rate*cvi+1]])
test_dict["tag"] = np.vstack([targetc[:cv_rate*cvi], targetc[cv_rate*cvi+1]])
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
| 37.477081
| 127
| 0.553254
| 4,621
| 40,063
| 4.559403
| 0.042631
| 0.034553
| 0.032655
| 0.01946
| 0.878067
| 0.849874
| 0.843846
| 0.822583
| 0.798139
| 0.784185
| 0
| 0.018237
| 0.325238
| 40,063
| 1,069
| 128
| 37.477081
| 0.761144
| 0.034521
| 0
| 0.715047
| 0
| 0
| 0.064615
| 0.006503
| 0
| 0
| 0
| 0
| 0.001332
| 1
| 0.045273
| false
| 0.001332
| 0.006658
| 0
| 0.095872
| 0.013316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2a720e7f5e53ae0b1f9c471339f6d93822b32c83
| 3,026
|
py
|
Python
|
tests/test_cards/test_actions/test_vassal.py
|
evanofslack/pyminion
|
0d0bfc6d8e84e9f33e617c7d01b6edb649166290
|
[
"MIT"
] | 5
|
2021-12-17T20:34:55.000Z
|
2022-01-24T15:18:05.000Z
|
tests/test_cards/test_actions/test_vassal.py
|
evanofslack/pyminion
|
0d0bfc6d8e84e9f33e617c7d01b6edb649166290
|
[
"MIT"
] | 31
|
2021-10-29T21:05:00.000Z
|
2022-03-22T03:27:14.000Z
|
tests/test_cards/test_actions/test_vassal.py
|
evanofslack/pyminion
|
0d0bfc6d8e84e9f33e617c7d01b6edb649166290
|
[
"MIT"
] | 1
|
2021-12-23T18:32:47.000Z
|
2021-12-23T18:32:47.000Z
|
from pyminion.expansions.base import estate, smithy, vassal, village
from pyminion.game import Game
from pyminion.players import Human
def test_vassal_not_action_play(human: Human, game: Game):
human.hand.add(vassal)
human.hand.cards[0].play(human, game)
assert len(human.hand) == 0
assert len(human.playmat) == 1
assert len(human.discard_pile) == 1
assert human.state.actions == 0
assert human.state.money == 2
def test_vassal_no_play(human: Human, game: Game, monkeypatch):
human.deck.add(smithy)
human.hand.add(vassal)
monkeypatch.setattr("builtins.input", lambda _: "n")
human.hand.cards[0].play(human, game)
assert len(human.hand) == 0
assert len(human.playmat) == 1
assert len(human.discard_pile) == 1
assert human.state.actions == 0
assert human.state.money == 2
def test_vassal_play(human: Human, game: Game, monkeypatch):
human.deck.add(smithy)
human.hand.add(vassal)
monkeypatch.setattr("builtins.input", lambda _: "y")
human.hand.cards[0].play(human, game)
assert len(human.hand) == 3
assert len(human.playmat) == 2
assert len(human.discard_pile) == 0
assert human.state.actions == 0
assert human.state.money == 2
def test_vassal_play_chain_two(human: Human, game: Game, monkeypatch):
# human.deck.add(vassal)
human.deck.add(vassal)
human.hand.add(vassal)
monkeypatch.setattr("builtins.input", lambda _: "y")
human.hand.cards[0].play(human, game)
assert len(human.hand) == 0
assert len(human.playmat) == 2
assert len(human.discard_pile) == 1
assert (human.discard_pile.cards[-1]) == estate
assert human.state.actions == 0
assert human.state.money == 4
def test_vassal_play_chain_three(human: Human, game: Game, monkeypatch):
human.deck.add(vassal)
human.deck.add(vassal)
human.hand.add(vassal)
monkeypatch.setattr("builtins.input", lambda _: "y")
human.hand.cards[0].play(human, game)
assert len(human.hand) == 0
assert len(human.playmat) == 3
assert len(human.discard_pile) == 1
assert (human.discard_pile.cards[-1]) == estate
assert human.state.actions == 0
assert human.state.money == 6
def test_vassal_play_chain_smithy(human: Human, game: Game, monkeypatch):
human.deck.add(smithy)
human.hand.add(vassal)
monkeypatch.setattr("builtins.input", lambda _: "y")
human.hand.cards[0].play(human, game)
assert len(human.hand) == 3
assert len(human.playmat) == 2
assert len(human.discard_pile) == 0
assert human.state.actions == 0
assert human.state.money == 2
def test_vassal_play_chain_village(human: Human, game: Game, monkeypatch):
human.deck.add(village)
human.hand.add(vassal)
monkeypatch.setattr("builtins.input", lambda _: "y")
human.hand.cards[0].play(human, game)
assert len(human.hand) == 1
assert len(human.playmat) == 2
assert len(human.discard_pile) == 0
assert human.state.actions == 2
assert human.state.money == 2
| 29.378641
| 74
| 0.684071
| 433
| 3,026
| 4.688222
| 0.106236
| 0.093103
| 0.144828
| 0.075369
| 0.906897
| 0.863547
| 0.863547
| 0.863547
| 0.84335
| 0.84335
| 0
| 0.017735
| 0.180106
| 3,026
| 102
| 75
| 29.666667
| 0.800484
| 0.00727
| 0
| 0.77027
| 0
| 0
| 0.02998
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.094595
| false
| 0
| 0.040541
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
aa9334e718cb72938607e5b24f81b1cbfc23cbf6
| 145
|
py
|
Python
|
source/MulensModel/mulensobjects/__init__.py
|
pmehta08/MulensModel
|
261738c445a8d116d09c90e65f6e847cfc8a7ad8
|
[
"MIT"
] | 30
|
2016-08-30T23:32:43.000Z
|
2022-03-07T20:06:25.000Z
|
source/MulensModel/mulensobjects/__init__.py
|
pmehta08/MulensModel
|
261738c445a8d116d09c90e65f6e847cfc8a7ad8
|
[
"MIT"
] | 25
|
2018-08-22T19:14:22.000Z
|
2022-03-28T17:22:56.000Z
|
source/MulensModel/mulensobjects/__init__.py
|
pmehta08/MulensModel
|
261738c445a8d116d09c90e65f6e847cfc8a7ad8
|
[
"MIT"
] | 11
|
2016-10-03T16:00:50.000Z
|
2022-03-23T16:53:54.000Z
|
from MulensModel.mulensobjects.lens import *
from MulensModel.mulensobjects.source import *
from MulensModel.mulensobjects.mulenssystem import *
| 36.25
| 52
| 0.855172
| 15
| 145
| 8.266667
| 0.466667
| 0.362903
| 0.677419
| 0.548387
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082759
| 145
| 3
| 53
| 48.333333
| 0.932331
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
aa9a5c5559fb02a10b21ff25396f17c37c390b68
| 82,702
|
py
|
Python
|
subversion/bindings/swig/python/repos.py
|
ruchirarya/svn
|
81502a213251c2af21361a942bd9a8cd7d3adb9f
|
[
"Apache-2.0"
] | 7
|
2018-01-18T06:13:21.000Z
|
2020-07-09T03:46:16.000Z
|
depe/subversion/subversion/bindings/swig/python/repos.py
|
louis-tru/TouchCode2
|
91c182aeaa37fba16e381ea749d32906dab1aeea
|
[
"BSD-3-Clause-Clear"
] | 4
|
2015-01-12T22:23:41.000Z
|
2015-01-12T22:33:52.000Z
|
src/subversion/subversion/bindings/swig/python/repos.py
|
schwern/alien-svn
|
7423b08f9bc4fdf0ac0d7ea53495269b21b3e8f9
|
[
"Apache-2.0"
] | 1
|
2020-11-04T07:19:37.000Z
|
2020-11-04T07:19:37.000Z
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.9
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_repos', [dirname(__file__)])
except ImportError:
import _repos
return _repos
if fp is not None:
try:
_mod = imp.load_module('_repos', fp, pathname, description)
finally:
fp.close()
return _mod
_repos = swig_import_helper()
del swig_import_helper
else:
import _repos
del version_info
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _copy_metadata_deep(value, old_value):
"""Copy all attributes of old_value into value, recursively traversing
lists and dicts if needed."""
if value is None or old_value is None or value is old_value: return
if isinstance(value, dict):
for k, v in value.iteritems():
_copy_metadata_deep(v, old_value[k])
elif isinstance(value, list):
for v, old_v in zip(value, old_value):
_copy_metadata_deep(v, old_v)
else:
try:
value.__dict__.update(old_value.__dict__)
except AttributeError:
pass
def _assert_valid_deep(value):
"""Assert value's validity, recursively traversing lists and dicts."""
if isinstance(value, dict):
for v in value.itervalues():
_assert_valid_deep(v)
elif isinstance(value, list):
for v in value:
_assert_valid_deep(v)
else:
if hasattr(value, "assert_valid"):
value.assert_valid()
import libsvn.core
import libsvn.delta
import libsvn.fs
def svn_repos_version():
"""svn_repos_version() -> svn_version_t const *"""
return _repos.svn_repos_version()
svn_node_action_change = _repos.svn_node_action_change
svn_node_action_add = _repos.svn_node_action_add
svn_node_action_delete = _repos.svn_node_action_delete
svn_node_action_replace = _repos.svn_node_action_replace
svn_repos_load_uuid_default = _repos.svn_repos_load_uuid_default
svn_repos_load_uuid_ignore = _repos.svn_repos_load_uuid_ignore
svn_repos_load_uuid_force = _repos.svn_repos_load_uuid_force
svn_authz_none = _repos.svn_authz_none
svn_authz_read = _repos.svn_authz_read
svn_authz_write = _repos.svn_authz_write
svn_authz_recursive = _repos.svn_authz_recursive
svn_repos_notify_warning = _repos.svn_repos_notify_warning
svn_repos_notify_dump_rev_end = _repos.svn_repos_notify_dump_rev_end
svn_repos_notify_verify_rev_end = _repos.svn_repos_notify_verify_rev_end
svn_repos_notify_dump_end = _repos.svn_repos_notify_dump_end
svn_repos_notify_verify_end = _repos.svn_repos_notify_verify_end
svn_repos_notify_pack_shard_start = _repos.svn_repos_notify_pack_shard_start
svn_repos_notify_pack_shard_end = _repos.svn_repos_notify_pack_shard_end
svn_repos_notify_pack_shard_start_revprop = _repos.svn_repos_notify_pack_shard_start_revprop
svn_repos_notify_pack_shard_end_revprop = _repos.svn_repos_notify_pack_shard_end_revprop
svn_repos_notify_load_txn_start = _repos.svn_repos_notify_load_txn_start
svn_repos_notify_load_txn_committed = _repos.svn_repos_notify_load_txn_committed
svn_repos_notify_load_node_start = _repos.svn_repos_notify_load_node_start
svn_repos_notify_load_node_done = _repos.svn_repos_notify_load_node_done
svn_repos_notify_load_copied_node = _repos.svn_repos_notify_load_copied_node
svn_repos_notify_load_normalized_mergeinfo = _repos.svn_repos_notify_load_normalized_mergeinfo
svn_repos_notify_mutex_acquired = _repos.svn_repos_notify_mutex_acquired
svn_repos_notify_recover_start = _repos.svn_repos_notify_recover_start
svn_repos_notify_upgrade_start = _repos.svn_repos_notify_upgrade_start
svn_repos_notify_load_skipped_rev = _repos.svn_repos_notify_load_skipped_rev
svn_repos_notify_verify_rev_structure = _repos.svn_repos_notify_verify_rev_structure
svn_repos_notify_warning_found_old_reference = _repos.svn_repos_notify_warning_found_old_reference
svn_repos_notify_warning_found_old_mergeinfo = _repos.svn_repos_notify_warning_found_old_mergeinfo
svn_repos_notify_warning_invalid_fspath = _repos.svn_repos_notify_warning_invalid_fspath
class svn_repos_notify_t:
"""Proxy of C svn_repos_notify_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_repos_notify_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_repos_notify_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_setmethods__["action"] = _repos.svn_repos_notify_t_action_set
__swig_getmethods__["action"] = _repos.svn_repos_notify_t_action_get
__swig_setmethods__["revision"] = _repos.svn_repos_notify_t_revision_set
__swig_getmethods__["revision"] = _repos.svn_repos_notify_t_revision_get
__swig_setmethods__["warning_str"] = _repos.svn_repos_notify_t_warning_str_set
__swig_getmethods__["warning_str"] = _repos.svn_repos_notify_t_warning_str_get
__swig_setmethods__["warning"] = _repos.svn_repos_notify_t_warning_set
__swig_getmethods__["warning"] = _repos.svn_repos_notify_t_warning_get
__swig_setmethods__["shard"] = _repos.svn_repos_notify_t_shard_set
__swig_getmethods__["shard"] = _repos.svn_repos_notify_t_shard_get
__swig_setmethods__["new_revision"] = _repos.svn_repos_notify_t_new_revision_set
__swig_getmethods__["new_revision"] = _repos.svn_repos_notify_t_new_revision_get
__swig_setmethods__["old_revision"] = _repos.svn_repos_notify_t_old_revision_set
__swig_getmethods__["old_revision"] = _repos.svn_repos_notify_t_old_revision_get
__swig_setmethods__["node_action"] = _repos.svn_repos_notify_t_node_action_set
__swig_getmethods__["node_action"] = _repos.svn_repos_notify_t_node_action_get
__swig_setmethods__["path"] = _repos.svn_repos_notify_t_path_set
__swig_getmethods__["path"] = _repos.svn_repos_notify_t_path_get
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_repos_notify_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
svn_repos_notify_t_swigregister = _repos.svn_repos_notify_t_swigregister
svn_repos_notify_t_swigregister(svn_repos_notify_t)
def svn_repos_notify_create(*args):
"""svn_repos_notify_create(svn_repos_notify_action_t action, apr_pool_t result_pool) -> svn_repos_notify_t"""
return _repos.svn_repos_notify_create(*args)
def svn_repos_find_root_path(*args):
"""svn_repos_find_root_path(char const * path, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_find_root_path(*args)
def svn_repos_open2(*args):
"""svn_repos_open2(char const * path, apr_hash_t fs_config, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_open2(*args)
def svn_repos_open(*args):
"""svn_repos_open(char const * path, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_open(*args)
def svn_repos_create(*args):
"""
svn_repos_create(char const * path, char const * unused_1, char const * unused_2, apr_hash_t config,
apr_hash_t fs_config, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_create(*args)
def svn_repos_upgrade2(*args):
"""
svn_repos_upgrade2(char const * path, svn_boolean_t nonblocking, svn_repos_notify_func_t notify_func,
void * notify_baton, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_upgrade2(*args)
def svn_repos_upgrade(*args):
"""
svn_repos_upgrade(char const * path, svn_boolean_t nonblocking, svn_error_t *(*)(void *) start_callback,
void * start_callback_baton, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_upgrade(*args)
def svn_repos_delete(*args):
"""svn_repos_delete(char const * path, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_delete(*args)
def svn_repos_has_capability(*args):
"""svn_repos_has_capability(svn_repos_t * repos, char const * capability, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_has_capability(*args)
SVN_REPOS_CAPABILITY_MERGEINFO = _repos.SVN_REPOS_CAPABILITY_MERGEINFO
def svn_repos_fs(*args):
"""svn_repos_fs(svn_repos_t * repos) -> svn_fs_t *"""
return _repos.svn_repos_fs(*args)
def svn_repos_hotcopy2(*args):
"""
svn_repos_hotcopy2(char const * src_path, char const * dst_path, svn_boolean_t clean_logs, svn_boolean_t incremental,
svn_cancel_func_t cancel_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_hotcopy2(*args)
def svn_repos_hotcopy(*args):
"""svn_repos_hotcopy(char const * src_path, char const * dst_path, svn_boolean_t clean_logs, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_hotcopy(*args)
def svn_repos_fs_pack2(*args):
"""
svn_repos_fs_pack2(svn_repos_t * repos, svn_repos_notify_func_t notify_func, void * notify_baton, svn_cancel_func_t cancel_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_pack2(*args)
def svn_repos_fs_pack(*args):
"""
svn_repos_fs_pack(svn_repos_t * repos, svn_fs_pack_notify_t notify_func, void * notify_baton, svn_cancel_func_t cancel_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_pack(*args)
def svn_repos_recover4(*args):
"""
svn_repos_recover4(char const * path, svn_boolean_t nonblocking, svn_repos_notify_func_t notify_func,
void * notify_baton, svn_cancel_func_t cancel_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_recover4(*args)
def svn_repos_recover3(*args):
"""
svn_repos_recover3(char const * path, svn_boolean_t nonblocking, svn_error_t *(*)(void *) start_callback,
void * start_callback_baton, svn_cancel_func_t cancel_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_recover3(*args)
def svn_repos_recover2(*args):
"""
svn_repos_recover2(char const * path, svn_boolean_t nonblocking, svn_error_t *(*)(void *) start_callback,
void * start_callback_baton, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_recover2(*args)
def svn_repos_recover(*args):
"""svn_repos_recover(char const * path, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_recover(*args)
def svn_repos_freeze(*args):
"""svn_repos_freeze(apr_array_header_t paths, svn_repos_freeze_func_t freeze_func, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_freeze(*args)
def svn_repos_db_logfiles(*args):
"""svn_repos_db_logfiles(char const * path, svn_boolean_t only_unused, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_db_logfiles(*args)
def svn_repos_path(*args):
"""svn_repos_path(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_path(*args)
def svn_repos_db_env(*args):
"""svn_repos_db_env(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_db_env(*args)
def svn_repos_conf_dir(*args):
"""svn_repos_conf_dir(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_conf_dir(*args)
def svn_repos_svnserve_conf(*args):
"""svn_repos_svnserve_conf(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_svnserve_conf(*args)
def svn_repos_lock_dir(*args):
"""svn_repos_lock_dir(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_lock_dir(*args)
def svn_repos_db_lockfile(*args):
"""svn_repos_db_lockfile(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_db_lockfile(*args)
def svn_repos_db_logs_lockfile(*args):
"""svn_repos_db_logs_lockfile(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_db_logs_lockfile(*args)
def svn_repos_hook_dir(*args):
"""svn_repos_hook_dir(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_hook_dir(*args)
def svn_repos_start_commit_hook(*args):
"""svn_repos_start_commit_hook(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_start_commit_hook(*args)
def svn_repos_pre_commit_hook(*args):
"""svn_repos_pre_commit_hook(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_pre_commit_hook(*args)
def svn_repos_post_commit_hook(*args):
"""svn_repos_post_commit_hook(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_post_commit_hook(*args)
def svn_repos_pre_revprop_change_hook(*args):
"""svn_repos_pre_revprop_change_hook(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_pre_revprop_change_hook(*args)
def svn_repos_post_revprop_change_hook(*args):
"""svn_repos_post_revprop_change_hook(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_post_revprop_change_hook(*args)
def svn_repos_pre_lock_hook(*args):
"""svn_repos_pre_lock_hook(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_pre_lock_hook(*args)
def svn_repos_post_lock_hook(*args):
"""svn_repos_post_lock_hook(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_post_lock_hook(*args)
def svn_repos_pre_unlock_hook(*args):
"""svn_repos_pre_unlock_hook(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_pre_unlock_hook(*args)
def svn_repos_post_unlock_hook(*args):
"""svn_repos_post_unlock_hook(svn_repos_t * repos, apr_pool_t pool) -> char const *"""
return _repos.svn_repos_post_unlock_hook(*args)
def svn_repos_hooks_setenv(*args):
"""svn_repos_hooks_setenv(svn_repos_t * repos, char const * hooks_env_path, apr_pool_t scratch_pool) -> svn_error_t"""
return _repos.svn_repos_hooks_setenv(*args)
def svn_repos_begin_report3(*args):
"""
svn_repos_begin_report3(svn_revnum_t revnum, svn_repos_t * repos, char const * fs_base, char const * target,
char const * tgt_path, svn_boolean_t text_deltas, svn_depth_t depth, svn_boolean_t ignore_ancestry,
svn_boolean_t send_copyfrom_args, svn_delta_editor_t editor,
void * edit_baton, svn_repos_authz_func_t authz_read_func, apr_size_t zero_copy_limit,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_begin_report3(*args)
def svn_repos_begin_report2(*args):
"""
svn_repos_begin_report2(svn_revnum_t revnum, svn_repos_t * repos, char const * fs_base, char const * target,
char const * tgt_path, svn_boolean_t text_deltas, svn_depth_t depth, svn_boolean_t ignore_ancestry,
svn_boolean_t send_copyfrom_args, svn_delta_editor_t editor,
void * edit_baton, svn_repos_authz_func_t authz_read_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_begin_report2(*args)
def svn_repos_begin_report(*args):
"""
svn_repos_begin_report(svn_revnum_t revnum, char const * username, svn_repos_t * repos, char const * fs_base,
char const * target, char const * tgt_path, svn_boolean_t text_deltas,
svn_boolean_t recurse, svn_boolean_t ignore_ancestry, svn_delta_editor_t editor,
void * edit_baton, svn_repos_authz_func_t authz_read_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_begin_report(*args)
def svn_repos_set_path3(*args):
"""
svn_repos_set_path3(void * report_baton, char const * path, svn_revnum_t revision, svn_depth_t depth,
svn_boolean_t start_empty, char const * lock_token, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_set_path3(*args)
def svn_repos_set_path2(*args):
"""
svn_repos_set_path2(void * report_baton, char const * path, svn_revnum_t revision, svn_boolean_t start_empty,
char const * lock_token, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_set_path2(*args)
def svn_repos_set_path(*args):
"""
svn_repos_set_path(void * report_baton, char const * path, svn_revnum_t revision, svn_boolean_t start_empty,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_set_path(*args)
def svn_repos_link_path3(*args):
"""
svn_repos_link_path3(void * report_baton, char const * path, char const * link_path, svn_revnum_t revision,
svn_depth_t depth, svn_boolean_t start_empty, char const * lock_token,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_link_path3(*args)
def svn_repos_link_path2(*args):
"""
svn_repos_link_path2(void * report_baton, char const * path, char const * link_path, svn_revnum_t revision,
svn_boolean_t start_empty, char const * lock_token, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_link_path2(*args)
def svn_repos_link_path(*args):
"""
svn_repos_link_path(void * report_baton, char const * path, char const * link_path, svn_revnum_t revision,
svn_boolean_t start_empty, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_link_path(*args)
def svn_repos_delete_path(*args):
"""svn_repos_delete_path(void * report_baton, char const * path, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_delete_path(*args)
def svn_repos_finish_report(*args):
"""svn_repos_finish_report(void * report_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_finish_report(*args)
def svn_repos_abort_report(*args):
"""svn_repos_abort_report(void * report_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_abort_report(*args)
def svn_repos_dir_delta2(*args):
"""
svn_repos_dir_delta2(svn_fs_root_t * src_root, char const * src_parent_dir, char const * src_entry, svn_fs_root_t * tgt_root,
char const * tgt_path, svn_delta_editor_t editor, void * edit_baton,
svn_repos_authz_func_t authz_read_func, svn_boolean_t text_deltas,
svn_depth_t depth, svn_boolean_t entry_props, svn_boolean_t ignore_ancestry,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_dir_delta2(*args)
def svn_repos_dir_delta(*args):
"""
svn_repos_dir_delta(svn_fs_root_t * src_root, char const * src_parent_dir, char const * src_entry, svn_fs_root_t * tgt_root,
char const * tgt_path, svn_delta_editor_t editor, void * edit_baton,
svn_repos_authz_func_t authz_read_func, svn_boolean_t text_deltas,
svn_boolean_t recurse, svn_boolean_t entry_props, svn_boolean_t ignore_ancestry,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_dir_delta(*args)
def svn_repos_replay2(*args):
"""
svn_repos_replay2(svn_fs_root_t * root, char const * base_dir, svn_revnum_t low_water_mark, svn_boolean_t send_deltas,
svn_delta_editor_t editor, void * edit_baton, svn_repos_authz_func_t authz_read_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_replay2(*args)
def svn_repos_replay(*args):
"""svn_repos_replay(svn_fs_root_t * root, svn_delta_editor_t editor, void * edit_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_replay(*args)
def svn_repos_get_commit_editor5(*args):
"""
svn_repos_get_commit_editor5(svn_repos_t * repos, svn_fs_txn_t * txn, char const * repos_url, char const * base_path,
apr_hash_t revprop_table, svn_commit_callback2_t commit_callback, svn_repos_authz_callback_t authz_callback,
void * authz_baton, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_commit_editor5(*args)
def svn_repos_get_commit_editor4(*args):
"""
svn_repos_get_commit_editor4(svn_repos_t * repos, svn_fs_txn_t * txn, char const * repos_url, char const * base_path,
char const * user, char const * log_msg, svn_commit_callback2_t commit_callback,
svn_repos_authz_callback_t authz_callback, void * authz_baton,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_commit_editor4(*args)
def svn_repos_get_commit_editor3(*args):
"""
svn_repos_get_commit_editor3(svn_repos_t * repos, svn_fs_txn_t * txn, char const * repos_url, char const * base_path,
char const * user, char const * log_msg, svn_commit_callback_t callback,
svn_repos_authz_callback_t authz_callback, void * authz_baton, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_commit_editor3(*args)
def svn_repos_get_commit_editor2(*args):
"""
svn_repos_get_commit_editor2(svn_repos_t * repos, svn_fs_txn_t * txn, char const * repos_url, char const * base_path,
char const * user, char const * log_msg, svn_commit_callback_t callback,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_commit_editor2(*args)
def svn_repos_get_commit_editor(*args):
"""
svn_repos_get_commit_editor(svn_repos_t * repos, char const * repos_url, char const * base_path, char const * user,
char const * log_msg, svn_commit_callback_t callback, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_commit_editor(*args)
def svn_repos_dated_revision(*args):
"""svn_repos_dated_revision(svn_repos_t * repos, apr_time_t tm, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_dated_revision(*args)
def svn_repos_get_committed_info(*args):
"""svn_repos_get_committed_info(svn_fs_root_t * root, char const * path, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_get_committed_info(*args)
def svn_repos_stat(*args):
"""svn_repos_stat(svn_fs_root_t * root, char const * path, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_stat(*args)
def svn_repos_deleted_rev(*args):
"""svn_repos_deleted_rev(svn_fs_t * fs, char const * path, svn_revnum_t start, svn_revnum_t end, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_deleted_rev(*args)
def svn_repos_history2(*args):
"""
svn_repos_history2(svn_fs_t * fs, char const * path, svn_repos_history_func_t history_func, svn_repos_authz_func_t authz_read_func,
svn_revnum_t start, svn_revnum_t end, svn_boolean_t cross_copies,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_history2(*args)
def svn_repos_history(*args):
"""
svn_repos_history(svn_fs_t * fs, char const * path, svn_repos_history_func_t history_func, svn_revnum_t start,
svn_revnum_t end, svn_boolean_t cross_copies, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_history(*args)
def svn_repos_trace_node_locations(*args):
"""
svn_repos_trace_node_locations(svn_fs_t * fs, char const * fs_path, svn_revnum_t peg_revision, apr_array_header_t location_revisions,
svn_repos_authz_func_t authz_read_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_trace_node_locations(*args)
def svn_repos_node_location_segments(*args):
"""
svn_repos_node_location_segments(svn_repos_t * repos, char const * path, svn_revnum_t peg_revision, svn_revnum_t start_rev,
svn_revnum_t end_rev, svn_location_segment_receiver_t receiver, svn_repos_authz_func_t authz_read_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_node_location_segments(*args)
def svn_repos_get_logs4(*args):
"""
svn_repos_get_logs4(svn_repos_t * repos, apr_array_header_t paths, svn_revnum_t start, svn_revnum_t end,
int limit, svn_boolean_t discover_changed_paths, svn_boolean_t strict_node_history,
svn_boolean_t include_merged_revisions, apr_array_header_t revprops,
svn_repos_authz_func_t authz_read_func, svn_log_entry_receiver_t receiver,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_logs4(*args)
def svn_repos_get_logs3(*args):
"""
svn_repos_get_logs3(svn_repos_t * repos, apr_array_header_t paths, svn_revnum_t start, svn_revnum_t end,
int limit, svn_boolean_t discover_changed_paths, svn_boolean_t strict_node_history,
svn_repos_authz_func_t authz_read_func, svn_log_message_receiver_t receiver,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_logs3(*args)
def svn_repos_get_logs2(*args):
"""
svn_repos_get_logs2(svn_repos_t * repos, apr_array_header_t paths, svn_revnum_t start, svn_revnum_t end,
svn_boolean_t discover_changed_paths, svn_boolean_t strict_node_history,
svn_repos_authz_func_t authz_read_func, svn_log_message_receiver_t receiver,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_logs2(*args)
def svn_repos_get_logs(*args):
"""
svn_repos_get_logs(svn_repos_t * repos, apr_array_header_t paths, svn_revnum_t start, svn_revnum_t end,
svn_boolean_t discover_changed_paths, svn_boolean_t strict_node_history,
svn_log_message_receiver_t receiver, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_logs(*args)
def svn_repos_fs_get_mergeinfo(*args):
"""
svn_repos_fs_get_mergeinfo(svn_repos_t * repos, apr_array_header_t paths, svn_revnum_t revision, svn_mergeinfo_inheritance_t inherit,
svn_boolean_t include_descendants, svn_repos_authz_func_t authz_read_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_get_mergeinfo(*args)
def svn_repos_get_file_revs2(*args):
"""
svn_repos_get_file_revs2(svn_repos_t * repos, char const * path, svn_revnum_t start, svn_revnum_t end, svn_boolean_t include_merged_revisions,
svn_repos_authz_func_t authz_read_func,
svn_file_rev_handler_t handler, void * handler_baton, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_file_revs2(*args)
def svn_repos_get_file_revs(*args):
"""
svn_repos_get_file_revs(svn_repos_t * repos, char const * path, svn_revnum_t start, svn_revnum_t end, svn_repos_authz_func_t authz_read_func,
svn_repos_file_rev_handler_t handler,
void * handler_baton, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_file_revs(*args)
def svn_repos_fs_commit_txn(*args):
"""svn_repos_fs_commit_txn(svn_repos_t * repos, svn_fs_txn_t * txn, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_fs_commit_txn(*args)
def svn_repos_fs_begin_txn_for_commit2(*args):
"""svn_repos_fs_begin_txn_for_commit2(svn_repos_t * repos, svn_revnum_t rev, apr_hash_t revprop_table, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_fs_begin_txn_for_commit2(*args)
def svn_repos_fs_begin_txn_for_commit(*args):
"""
svn_repos_fs_begin_txn_for_commit(svn_repos_t * repos, svn_revnum_t rev, char const * author, char const * log_msg,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_begin_txn_for_commit(*args)
def svn_repos_fs_begin_txn_for_update(*args):
"""svn_repos_fs_begin_txn_for_update(svn_repos_t * repos, svn_revnum_t rev, char const * author, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_fs_begin_txn_for_update(*args)
def svn_repos_fs_lock(*args):
"""
svn_repos_fs_lock(svn_repos_t * repos, char const * path, char const * token, char const * comment,
svn_boolean_t is_dav_comment, apr_time_t expiration_date, svn_revnum_t current_rev,
svn_boolean_t steal_lock, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_lock(*args)
def svn_repos_fs_unlock(*args):
"""
svn_repos_fs_unlock(svn_repos_t * repos, char const * path, char const * token, svn_boolean_t break_lock,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_unlock(*args)
def svn_repos_fs_get_locks2(*args):
"""
svn_repos_fs_get_locks2(svn_repos_t * repos, char const * path, svn_depth_t depth, svn_repos_authz_func_t authz_read_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_get_locks2(*args)
def svn_repos_fs_get_locks(*args):
"""svn_repos_fs_get_locks(svn_repos_t * repos, char const * path, svn_repos_authz_func_t authz_read_func, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_fs_get_locks(*args)
def svn_repos_fs_change_rev_prop4(*args):
"""
svn_repos_fs_change_rev_prop4(svn_repos_t * repos, svn_revnum_t rev, char const * author, char const * name, svn_string_t const *const * old_value_p,
svn_string_t const * new_value, svn_boolean_t use_pre_revprop_change_hook,
svn_boolean_t use_post_revprop_change_hook,
svn_repos_authz_func_t authz_read_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_change_rev_prop4(*args)
def svn_repos_fs_change_rev_prop3(*args):
"""
svn_repos_fs_change_rev_prop3(svn_repos_t * repos, svn_revnum_t rev, char const * author, char const * name, svn_string_t const * new_value,
svn_boolean_t use_pre_revprop_change_hook, svn_boolean_t use_post_revprop_change_hook,
svn_repos_authz_func_t authz_read_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_change_rev_prop3(*args)
def svn_repos_fs_change_rev_prop2(*args):
"""
svn_repos_fs_change_rev_prop2(svn_repos_t * repos, svn_revnum_t rev, char const * author, char const * name, svn_string_t const * new_value,
svn_repos_authz_func_t authz_read_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_change_rev_prop2(*args)
def svn_repos_fs_change_rev_prop(*args):
"""
svn_repos_fs_change_rev_prop(svn_repos_t * repos, svn_revnum_t rev, char const * author, char const * name, svn_string_t const * new_value,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_change_rev_prop(*args)
def svn_repos_fs_revision_prop(*args):
"""
svn_repos_fs_revision_prop(svn_repos_t * repos, svn_revnum_t rev, char const * propname, svn_repos_authz_func_t authz_read_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_revision_prop(*args)
def svn_repos_fs_revision_proplist(*args):
"""svn_repos_fs_revision_proplist(svn_repos_t * repos, svn_revnum_t rev, svn_repos_authz_func_t authz_read_func, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_fs_revision_proplist(*args)
def svn_repos_fs_change_node_prop(*args):
"""
svn_repos_fs_change_node_prop(svn_fs_root_t * root, char const * path, char const * name, svn_string_t const * value,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_change_node_prop(*args)
def svn_repos_fs_change_txn_prop(*args):
"""svn_repos_fs_change_txn_prop(svn_fs_txn_t * txn, char const * name, svn_string_t const * value, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_fs_change_txn_prop(*args)
def svn_repos_fs_change_txn_props(*args):
"""svn_repos_fs_change_txn_props(svn_fs_txn_t * txn, apr_array_header_t props, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_fs_change_txn_props(*args)
class svn_repos_node_t:
"""Proxy of C svn_repos_node_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_repos_node_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_repos_node_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_setmethods__["kind"] = _repos.svn_repos_node_t_kind_set
__swig_getmethods__["kind"] = _repos.svn_repos_node_t_kind_get
__swig_setmethods__["action"] = _repos.svn_repos_node_t_action_set
__swig_getmethods__["action"] = _repos.svn_repos_node_t_action_get
__swig_setmethods__["text_mod"] = _repos.svn_repos_node_t_text_mod_set
__swig_getmethods__["text_mod"] = _repos.svn_repos_node_t_text_mod_get
__swig_setmethods__["prop_mod"] = _repos.svn_repos_node_t_prop_mod_set
__swig_getmethods__["prop_mod"] = _repos.svn_repos_node_t_prop_mod_get
__swig_setmethods__["name"] = _repos.svn_repos_node_t_name_set
__swig_getmethods__["name"] = _repos.svn_repos_node_t_name_get
__swig_setmethods__["copyfrom_rev"] = _repos.svn_repos_node_t_copyfrom_rev_set
__swig_getmethods__["copyfrom_rev"] = _repos.svn_repos_node_t_copyfrom_rev_get
__swig_setmethods__["copyfrom_path"] = _repos.svn_repos_node_t_copyfrom_path_set
__swig_getmethods__["copyfrom_path"] = _repos.svn_repos_node_t_copyfrom_path_get
__swig_setmethods__["sibling"] = _repos.svn_repos_node_t_sibling_set
__swig_getmethods__["sibling"] = _repos.svn_repos_node_t_sibling_get
__swig_setmethods__["child"] = _repos.svn_repos_node_t_child_set
__swig_getmethods__["child"] = _repos.svn_repos_node_t_child_get
__swig_setmethods__["parent"] = _repos.svn_repos_node_t_parent_set
__swig_getmethods__["parent"] = _repos.svn_repos_node_t_parent_get
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_repos_node_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
svn_repos_node_t_swigregister = _repos.svn_repos_node_t_swigregister
svn_repos_node_t_swigregister(svn_repos_node_t)
def svn_repos_node_editor(*args):
"""
svn_repos_node_editor(svn_repos_t * repos, svn_fs_root_t * base_root, svn_fs_root_t * root, apr_pool_t node_pool,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_node_editor(*args)
def svn_repos_node_from_baton(*args):
"""svn_repos_node_from_baton(void * edit_baton) -> svn_repos_node_t"""
return _repos.svn_repos_node_from_baton(*args)
SVN_REPOS_DUMPFILE_MAGIC_HEADER = _repos.SVN_REPOS_DUMPFILE_MAGIC_HEADER
SVN_REPOS_DUMPFILE_FORMAT_VERSION = _repos.SVN_REPOS_DUMPFILE_FORMAT_VERSION
SVN_REPOS_DUMPFILE_FORMAT_VERSION_DELTAS = _repos.SVN_REPOS_DUMPFILE_FORMAT_VERSION_DELTAS
SVN_REPOS_DUMPFILE_UUID = _repos.SVN_REPOS_DUMPFILE_UUID
SVN_REPOS_DUMPFILE_CONTENT_LENGTH = _repos.SVN_REPOS_DUMPFILE_CONTENT_LENGTH
SVN_REPOS_DUMPFILE_REVISION_NUMBER = _repos.SVN_REPOS_DUMPFILE_REVISION_NUMBER
SVN_REPOS_DUMPFILE_NODE_PATH = _repos.SVN_REPOS_DUMPFILE_NODE_PATH
SVN_REPOS_DUMPFILE_NODE_KIND = _repos.SVN_REPOS_DUMPFILE_NODE_KIND
SVN_REPOS_DUMPFILE_NODE_ACTION = _repos.SVN_REPOS_DUMPFILE_NODE_ACTION
SVN_REPOS_DUMPFILE_NODE_COPYFROM_PATH = _repos.SVN_REPOS_DUMPFILE_NODE_COPYFROM_PATH
SVN_REPOS_DUMPFILE_NODE_COPYFROM_REV = _repos.SVN_REPOS_DUMPFILE_NODE_COPYFROM_REV
SVN_REPOS_DUMPFILE_TEXT_COPY_SOURCE_MD5 = _repos.SVN_REPOS_DUMPFILE_TEXT_COPY_SOURCE_MD5
SVN_REPOS_DUMPFILE_TEXT_COPY_SOURCE_SHA1 = _repos.SVN_REPOS_DUMPFILE_TEXT_COPY_SOURCE_SHA1
SVN_REPOS_DUMPFILE_TEXT_COPY_SOURCE_CHECKSUM = _repos.SVN_REPOS_DUMPFILE_TEXT_COPY_SOURCE_CHECKSUM
SVN_REPOS_DUMPFILE_TEXT_CONTENT_MD5 = _repos.SVN_REPOS_DUMPFILE_TEXT_CONTENT_MD5
SVN_REPOS_DUMPFILE_TEXT_CONTENT_SHA1 = _repos.SVN_REPOS_DUMPFILE_TEXT_CONTENT_SHA1
SVN_REPOS_DUMPFILE_TEXT_CONTENT_CHECKSUM = _repos.SVN_REPOS_DUMPFILE_TEXT_CONTENT_CHECKSUM
SVN_REPOS_DUMPFILE_PROP_CONTENT_LENGTH = _repos.SVN_REPOS_DUMPFILE_PROP_CONTENT_LENGTH
SVN_REPOS_DUMPFILE_TEXT_CONTENT_LENGTH = _repos.SVN_REPOS_DUMPFILE_TEXT_CONTENT_LENGTH
SVN_REPOS_DUMPFILE_PROP_DELTA = _repos.SVN_REPOS_DUMPFILE_PROP_DELTA
SVN_REPOS_DUMPFILE_TEXT_DELTA = _repos.SVN_REPOS_DUMPFILE_TEXT_DELTA
SVN_REPOS_DUMPFILE_TEXT_DELTA_BASE_MD5 = _repos.SVN_REPOS_DUMPFILE_TEXT_DELTA_BASE_MD5
SVN_REPOS_DUMPFILE_TEXT_DELTA_BASE_SHA1 = _repos.SVN_REPOS_DUMPFILE_TEXT_DELTA_BASE_SHA1
SVN_REPOS_DUMPFILE_TEXT_DELTA_BASE_CHECKSUM = _repos.SVN_REPOS_DUMPFILE_TEXT_DELTA_BASE_CHECKSUM
def svn_repos_verify_fs2(*args):
"""
svn_repos_verify_fs2(svn_repos_t * repos, svn_revnum_t start_rev, svn_revnum_t end_rev, svn_repos_notify_func_t notify_func,
void * notify_baton, svn_cancel_func_t cancel, void * cancel_baton,
apr_pool_t scratch_pool) -> svn_error_t
"""
return _repos.svn_repos_verify_fs2(*args)
def svn_repos_verify_fs(*args):
"""
svn_repos_verify_fs(svn_repos_t * repos, svn_stream_t * feedback_stream, svn_revnum_t start_rev, svn_revnum_t end_rev,
svn_cancel_func_t cancel_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_verify_fs(*args)
def svn_repos_dump_fs3(*args):
"""
svn_repos_dump_fs3(svn_repos_t * repos, svn_stream_t * dumpstream, svn_revnum_t start_rev, svn_revnum_t end_rev,
svn_boolean_t incremental, svn_boolean_t use_deltas, svn_repos_notify_func_t notify_func,
void * notify_baton, svn_cancel_func_t cancel_func,
apr_pool_t scratch_pool) -> svn_error_t
"""
return _repos.svn_repos_dump_fs3(*args)
def svn_repos_dump_fs2(*args):
"""
svn_repos_dump_fs2(svn_repos_t * repos, svn_stream_t * dumpstream, svn_stream_t * feedback_stream, svn_revnum_t start_rev,
svn_revnum_t end_rev, svn_boolean_t incremental, svn_boolean_t use_deltas,
svn_cancel_func_t cancel_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_dump_fs2(*args)
def svn_repos_dump_fs(*args):
"""
svn_repos_dump_fs(svn_repos_t * repos, svn_stream_t * dumpstream, svn_stream_t * feedback_stream, svn_revnum_t start_rev,
svn_revnum_t end_rev, svn_boolean_t incremental, svn_cancel_func_t cancel_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_dump_fs(*args)
def svn_repos_load_fs4(*args):
"""
svn_repos_load_fs4(svn_repos_t * repos, svn_stream_t * dumpstream, svn_revnum_t start_rev, svn_revnum_t end_rev,
enum svn_repos_load_uuid uuid_action, char const * parent_dir,
svn_boolean_t use_pre_commit_hook, svn_boolean_t use_post_commit_hook, svn_boolean_t validate_props,
svn_repos_notify_func_t notify_func, void * notify_baton,
svn_cancel_func_t cancel_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_load_fs4(*args)
def svn_repos_load_fs3(*args):
"""
svn_repos_load_fs3(svn_repos_t * repos, svn_stream_t * dumpstream, enum svn_repos_load_uuid uuid_action,
char const * parent_dir, svn_boolean_t use_pre_commit_hook, svn_boolean_t use_post_commit_hook,
svn_boolean_t validate_props, svn_repos_notify_func_t notify_func,
void * notify_baton, svn_cancel_func_t cancel_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_load_fs3(*args)
def svn_repos_load_fs2(*args):
"""
svn_repos_load_fs2(svn_repos_t * repos, svn_stream_t * dumpstream, svn_stream_t * feedback_stream, enum svn_repos_load_uuid uuid_action,
char const * parent_dir, svn_boolean_t use_pre_commit_hook,
svn_boolean_t use_post_commit_hook, svn_cancel_func_t cancel_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_load_fs2(*args)
def svn_repos_load_fs(*args):
"""
svn_repos_load_fs(svn_repos_t * repos, svn_stream_t * dumpstream, svn_stream_t * feedback_stream, enum svn_repos_load_uuid uuid_action,
char const * parent_dir, svn_cancel_func_t cancel_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_load_fs(*args)
class svn_repos_parse_fns3_t:
"""Proxy of C svn_repos_parse_fns3_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_repos_parse_fns3_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_repos_parse_fns3_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_setmethods__["magic_header_record"] = _repos.svn_repos_parse_fns3_t_magic_header_record_set
__swig_getmethods__["magic_header_record"] = _repos.svn_repos_parse_fns3_t_magic_header_record_get
__swig_setmethods__["uuid_record"] = _repos.svn_repos_parse_fns3_t_uuid_record_set
__swig_getmethods__["uuid_record"] = _repos.svn_repos_parse_fns3_t_uuid_record_get
__swig_setmethods__["new_revision_record"] = _repos.svn_repos_parse_fns3_t_new_revision_record_set
__swig_getmethods__["new_revision_record"] = _repos.svn_repos_parse_fns3_t_new_revision_record_get
__swig_setmethods__["new_node_record"] = _repos.svn_repos_parse_fns3_t_new_node_record_set
__swig_getmethods__["new_node_record"] = _repos.svn_repos_parse_fns3_t_new_node_record_get
__swig_setmethods__["set_revision_property"] = _repos.svn_repos_parse_fns3_t_set_revision_property_set
__swig_getmethods__["set_revision_property"] = _repos.svn_repos_parse_fns3_t_set_revision_property_get
__swig_setmethods__["set_node_property"] = _repos.svn_repos_parse_fns3_t_set_node_property_set
__swig_getmethods__["set_node_property"] = _repos.svn_repos_parse_fns3_t_set_node_property_get
__swig_setmethods__["delete_node_property"] = _repos.svn_repos_parse_fns3_t_delete_node_property_set
__swig_getmethods__["delete_node_property"] = _repos.svn_repos_parse_fns3_t_delete_node_property_get
__swig_setmethods__["remove_node_props"] = _repos.svn_repos_parse_fns3_t_remove_node_props_set
__swig_getmethods__["remove_node_props"] = _repos.svn_repos_parse_fns3_t_remove_node_props_get
__swig_setmethods__["set_fulltext"] = _repos.svn_repos_parse_fns3_t_set_fulltext_set
__swig_getmethods__["set_fulltext"] = _repos.svn_repos_parse_fns3_t_set_fulltext_get
__swig_setmethods__["apply_textdelta"] = _repos.svn_repos_parse_fns3_t_apply_textdelta_set
__swig_getmethods__["apply_textdelta"] = _repos.svn_repos_parse_fns3_t_apply_textdelta_get
__swig_setmethods__["close_node"] = _repos.svn_repos_parse_fns3_t_close_node_set
__swig_getmethods__["close_node"] = _repos.svn_repos_parse_fns3_t_close_node_get
__swig_setmethods__["close_revision"] = _repos.svn_repos_parse_fns3_t_close_revision_set
__swig_getmethods__["close_revision"] = _repos.svn_repos_parse_fns3_t_close_revision_get
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_repos_parse_fns3_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def magic_header_record(self, *args):
return svn_repos_parse_fns3_invoke_magic_header_record(self, *args)
def uuid_record(self, *args):
return svn_repos_parse_fns3_invoke_uuid_record(self, *args)
def new_revision_record(self, *args):
return svn_repos_parse_fns3_invoke_new_revision_record(self, *args)
def new_node_record(self, *args):
return svn_repos_parse_fns3_invoke_new_node_record(self, *args)
def set_revision_property(self, *args):
return svn_repos_parse_fns3_invoke_set_revision_property(self, *args)
def set_node_property(self, *args):
return svn_repos_parse_fns3_invoke_set_node_property(self, *args)
def delete_node_property(self, *args):
return svn_repos_parse_fns3_invoke_delete_node_property(self, *args)
def remove_node_props(self, *args):
return svn_repos_parse_fns3_invoke_remove_node_props(self, *args)
def set_fulltext(self, *args):
return svn_repos_parse_fns3_invoke_set_fulltext(self, *args)
def apply_textdelta(self, *args):
return svn_repos_parse_fns3_invoke_apply_textdelta(self, *args)
def close_node(self, *args):
return svn_repos_parse_fns3_invoke_close_node(self, *args)
def close_revision(self, *args):
return svn_repos_parse_fns3_invoke_close_revision(self, *args)
svn_repos_parse_fns3_t_swigregister = _repos.svn_repos_parse_fns3_t_swigregister
svn_repos_parse_fns3_t_swigregister(svn_repos_parse_fns3_t)
def svn_repos_parse_dumpstream3(*args):
"""
svn_repos_parse_dumpstream3(svn_stream_t * stream, svn_repos_parse_fns3_t parse_fns, void * parse_baton, svn_boolean_t deltas_are_text,
svn_cancel_func_t cancel_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_parse_dumpstream3(*args)
def svn_repos_get_fs_build_parser4(*args):
"""
svn_repos_get_fs_build_parser4(svn_repos_t * repos, svn_revnum_t start_rev, svn_revnum_t end_rev, svn_boolean_t use_history,
svn_boolean_t validate_props, enum svn_repos_load_uuid uuid_action,
char const * parent_dir, svn_repos_notify_func_t notify_func, void * notify_baton,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_fs_build_parser4(*args)
class svn_repos_parse_fns2_t:
"""Proxy of C svn_repos_parse_fns2_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_repos_parse_fns2_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_repos_parse_fns2_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_setmethods__["new_revision_record"] = _repos.svn_repos_parse_fns2_t_new_revision_record_set
__swig_getmethods__["new_revision_record"] = _repos.svn_repos_parse_fns2_t_new_revision_record_get
__swig_setmethods__["uuid_record"] = _repos.svn_repos_parse_fns2_t_uuid_record_set
__swig_getmethods__["uuid_record"] = _repos.svn_repos_parse_fns2_t_uuid_record_get
__swig_setmethods__["new_node_record"] = _repos.svn_repos_parse_fns2_t_new_node_record_set
__swig_getmethods__["new_node_record"] = _repos.svn_repos_parse_fns2_t_new_node_record_get
__swig_setmethods__["set_revision_property"] = _repos.svn_repos_parse_fns2_t_set_revision_property_set
__swig_getmethods__["set_revision_property"] = _repos.svn_repos_parse_fns2_t_set_revision_property_get
__swig_setmethods__["set_node_property"] = _repos.svn_repos_parse_fns2_t_set_node_property_set
__swig_getmethods__["set_node_property"] = _repos.svn_repos_parse_fns2_t_set_node_property_get
__swig_setmethods__["delete_node_property"] = _repos.svn_repos_parse_fns2_t_delete_node_property_set
__swig_getmethods__["delete_node_property"] = _repos.svn_repos_parse_fns2_t_delete_node_property_get
__swig_setmethods__["remove_node_props"] = _repos.svn_repos_parse_fns2_t_remove_node_props_set
__swig_getmethods__["remove_node_props"] = _repos.svn_repos_parse_fns2_t_remove_node_props_get
__swig_setmethods__["set_fulltext"] = _repos.svn_repos_parse_fns2_t_set_fulltext_set
__swig_getmethods__["set_fulltext"] = _repos.svn_repos_parse_fns2_t_set_fulltext_get
__swig_setmethods__["apply_textdelta"] = _repos.svn_repos_parse_fns2_t_apply_textdelta_set
__swig_getmethods__["apply_textdelta"] = _repos.svn_repos_parse_fns2_t_apply_textdelta_get
__swig_setmethods__["close_node"] = _repos.svn_repos_parse_fns2_t_close_node_set
__swig_getmethods__["close_node"] = _repos.svn_repos_parse_fns2_t_close_node_get
__swig_setmethods__["close_revision"] = _repos.svn_repos_parse_fns2_t_close_revision_set
__swig_getmethods__["close_revision"] = _repos.svn_repos_parse_fns2_t_close_revision_get
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_repos_parse_fns2_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def new_revision_record(self, *args):
return svn_repos_parse_fns2_invoke_new_revision_record(self, *args)
def uuid_record(self, *args):
return svn_repos_parse_fns2_invoke_uuid_record(self, *args)
def new_node_record(self, *args):
return svn_repos_parse_fns2_invoke_new_node_record(self, *args)
def set_revision_property(self, *args):
return svn_repos_parse_fns2_invoke_set_revision_property(self, *args)
def set_node_property(self, *args):
return svn_repos_parse_fns2_invoke_set_node_property(self, *args)
def delete_node_property(self, *args):
return svn_repos_parse_fns2_invoke_delete_node_property(self, *args)
def remove_node_props(self, *args):
return svn_repos_parse_fns2_invoke_remove_node_props(self, *args)
def set_fulltext(self, *args):
return svn_repos_parse_fns2_invoke_set_fulltext(self, *args)
def apply_textdelta(self, *args):
return svn_repos_parse_fns2_invoke_apply_textdelta(self, *args)
def close_node(self, *args):
return svn_repos_parse_fns2_invoke_close_node(self, *args)
def close_revision(self, *args):
return svn_repos_parse_fns2_invoke_close_revision(self, *args)
svn_repos_parse_fns2_t_swigregister = _repos.svn_repos_parse_fns2_t_swigregister
svn_repos_parse_fns2_t_swigregister(svn_repos_parse_fns2_t)
class svn_repos_parser_fns_t:
"""Proxy of C svn_repos_parse_fns_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_repos_parser_fns_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_repos_parser_fns_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_setmethods__["new_revision_record"] = _repos.svn_repos_parser_fns_t_new_revision_record_set
__swig_getmethods__["new_revision_record"] = _repos.svn_repos_parser_fns_t_new_revision_record_get
__swig_setmethods__["uuid_record"] = _repos.svn_repos_parser_fns_t_uuid_record_set
__swig_getmethods__["uuid_record"] = _repos.svn_repos_parser_fns_t_uuid_record_get
__swig_setmethods__["new_node_record"] = _repos.svn_repos_parser_fns_t_new_node_record_set
__swig_getmethods__["new_node_record"] = _repos.svn_repos_parser_fns_t_new_node_record_get
__swig_setmethods__["set_revision_property"] = _repos.svn_repos_parser_fns_t_set_revision_property_set
__swig_getmethods__["set_revision_property"] = _repos.svn_repos_parser_fns_t_set_revision_property_get
__swig_setmethods__["set_node_property"] = _repos.svn_repos_parser_fns_t_set_node_property_set
__swig_getmethods__["set_node_property"] = _repos.svn_repos_parser_fns_t_set_node_property_get
__swig_setmethods__["remove_node_props"] = _repos.svn_repos_parser_fns_t_remove_node_props_set
__swig_getmethods__["remove_node_props"] = _repos.svn_repos_parser_fns_t_remove_node_props_get
__swig_setmethods__["set_fulltext"] = _repos.svn_repos_parser_fns_t_set_fulltext_set
__swig_getmethods__["set_fulltext"] = _repos.svn_repos_parser_fns_t_set_fulltext_get
__swig_setmethods__["close_node"] = _repos.svn_repos_parser_fns_t_close_node_set
__swig_getmethods__["close_node"] = _repos.svn_repos_parser_fns_t_close_node_get
__swig_setmethods__["close_revision"] = _repos.svn_repos_parser_fns_t_close_revision_set
__swig_getmethods__["close_revision"] = _repos.svn_repos_parser_fns_t_close_revision_get
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_repos_parse_fns_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
svn_repos_parser_fns_t_swigregister = _repos.svn_repos_parser_fns_t_swigregister
svn_repos_parser_fns_t_swigregister(svn_repos_parser_fns_t)
def svn_repos_parse_dumpstream2(*args):
"""
svn_repos_parse_dumpstream2(svn_stream_t * stream, svn_repos_parse_fns2_t parse_fns, void * parse_baton, svn_cancel_func_t cancel_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_parse_dumpstream2(*args)
def svn_repos_parse_dumpstream(*args):
"""
svn_repos_parse_dumpstream(svn_stream_t * stream, svn_repos_parser_fns_t parse_fns, void * parse_baton, svn_cancel_func_t cancel_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_parse_dumpstream(*args)
def svn_repos_get_fs_build_parser3(*args):
"""
svn_repos_get_fs_build_parser3(svn_repos_t * repos, svn_boolean_t use_history, svn_boolean_t validate_props, enum svn_repos_load_uuid uuid_action,
char const * parent_dir, svn_repos_notify_func_t notify_func,
void * notify_baton, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_fs_build_parser3(*args)
def svn_repos_get_fs_build_parser2(*args):
"""
svn_repos_get_fs_build_parser2(svn_repos_t * repos, svn_boolean_t use_history, enum svn_repos_load_uuid uuid_action,
svn_stream_t * outstream, char const * parent_dir, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_fs_build_parser2(*args)
def svn_repos_get_fs_build_parser(*args):
"""
svn_repos_get_fs_build_parser(svn_repos_t * repos, svn_boolean_t use_history, enum svn_repos_load_uuid uuid_action,
svn_stream_t * outstream, char const * parent_dir, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_fs_build_parser(*args)
def svn_repos_authz_read2(*args):
"""svn_repos_authz_read2(char const * path, char const * groups_path, svn_boolean_t must_exist, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_authz_read2(*args)
def svn_repos_authz_read(*args):
"""svn_repos_authz_read(char const * file, svn_boolean_t must_exist, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_authz_read(*args)
def svn_repos_authz_parse(*args):
"""svn_repos_authz_parse(svn_stream_t * stream, svn_stream_t * groups_stream, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_authz_parse(*args)
def svn_repos_authz_check_access(*args):
"""
svn_repos_authz_check_access(svn_authz_t * authz, char const * repos_name, char const * path, char const * user,
svn_repos_authz_access_t required_access, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_authz_check_access(*args)
svn_repos_revision_access_none = _repos.svn_repos_revision_access_none
svn_repos_revision_access_partial = _repos.svn_repos_revision_access_partial
svn_repos_revision_access_full = _repos.svn_repos_revision_access_full
def svn_repos_check_revision_access(*args):
"""
svn_repos_check_revision_access(svn_repos_revision_access_level_t * access_level, svn_repos_t * repos, svn_revnum_t revision,
svn_repos_authz_func_t authz_read_func, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_check_revision_access(*args)
def svn_repos_fs_get_inherited_props(*args):
"""
svn_repos_fs_get_inherited_props(svn_fs_root_t * root, char const * path, char const * propname, svn_repos_authz_func_t authz_read_func,
apr_pool_t result_pool, apr_pool_t scratch_pool) -> svn_error_t
"""
return _repos.svn_repos_fs_get_inherited_props(*args)
def svn_repos_remember_client_capabilities(*args):
"""svn_repos_remember_client_capabilities(svn_repos_t * repos, apr_array_header_t capabilities) -> svn_error_t"""
return _repos.svn_repos_remember_client_capabilities(*args)
class svn_repos_t:
"""Proxy of C svn_repos_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_repos_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_repos_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_repos_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
svn_repos_t_swigregister = _repos.svn_repos_t_swigregister
svn_repos_t_swigregister(svn_repos_t)
class svn_authz_t:
"""Proxy of C svn_authz_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_authz_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_authz_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_authz_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
svn_authz_t_swigregister = _repos.svn_authz_t_swigregister
svn_authz_t_swigregister(svn_authz_t)
def svn_repos_parse_fns3_invoke_magic_header_record(*args):
"""svn_repos_parse_fns3_invoke_magic_header_record(svn_repos_parse_fns3_t _obj, int version, void * parse_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_parse_fns3_invoke_magic_header_record(*args)
def svn_repos_parse_fns3_invoke_uuid_record(*args):
"""svn_repos_parse_fns3_invoke_uuid_record(svn_repos_parse_fns3_t _obj, char const * uuid, void * parse_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_parse_fns3_invoke_uuid_record(*args)
def svn_repos_parse_fns3_invoke_new_revision_record(*args):
"""svn_repos_parse_fns3_invoke_new_revision_record(svn_repos_parse_fns3_t _obj, apr_hash_t headers, void * parse_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_parse_fns3_invoke_new_revision_record(*args)
def svn_repos_parse_fns3_invoke_new_node_record(*args):
"""svn_repos_parse_fns3_invoke_new_node_record(svn_repos_parse_fns3_t _obj, apr_hash_t headers, void * revision_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_parse_fns3_invoke_new_node_record(*args)
def svn_repos_parse_fns3_invoke_set_revision_property(*args):
"""svn_repos_parse_fns3_invoke_set_revision_property(svn_repos_parse_fns3_t _obj, void * revision_baton, char const * name, svn_string_t const * value) -> svn_error_t"""
return _repos.svn_repos_parse_fns3_invoke_set_revision_property(*args)
def svn_repos_parse_fns3_invoke_set_node_property(*args):
"""svn_repos_parse_fns3_invoke_set_node_property(svn_repos_parse_fns3_t _obj, void * node_baton, char const * name, svn_string_t const * value) -> svn_error_t"""
return _repos.svn_repos_parse_fns3_invoke_set_node_property(*args)
def svn_repos_parse_fns3_invoke_delete_node_property(*args):
"""svn_repos_parse_fns3_invoke_delete_node_property(svn_repos_parse_fns3_t _obj, void * node_baton, char const * name) -> svn_error_t"""
return _repos.svn_repos_parse_fns3_invoke_delete_node_property(*args)
def svn_repos_parse_fns3_invoke_remove_node_props(*args):
"""svn_repos_parse_fns3_invoke_remove_node_props(svn_repos_parse_fns3_t _obj, void * node_baton) -> svn_error_t"""
return _repos.svn_repos_parse_fns3_invoke_remove_node_props(*args)
def svn_repos_parse_fns3_invoke_set_fulltext(*args):
"""svn_repos_parse_fns3_invoke_set_fulltext(svn_repos_parse_fns3_t _obj, void * node_baton) -> svn_error_t"""
return _repos.svn_repos_parse_fns3_invoke_set_fulltext(*args)
def svn_repos_parse_fns3_invoke_apply_textdelta(*args):
"""svn_repos_parse_fns3_invoke_apply_textdelta(svn_repos_parse_fns3_t _obj, void * node_baton) -> svn_error_t"""
return _repos.svn_repos_parse_fns3_invoke_apply_textdelta(*args)
def svn_repos_parse_fns3_invoke_close_node(*args):
"""svn_repos_parse_fns3_invoke_close_node(svn_repos_parse_fns3_t _obj, void * node_baton) -> svn_error_t"""
return _repos.svn_repos_parse_fns3_invoke_close_node(*args)
def svn_repos_parse_fns3_invoke_close_revision(*args):
"""svn_repos_parse_fns3_invoke_close_revision(svn_repos_parse_fns3_t _obj, void * revision_baton) -> svn_error_t"""
return _repos.svn_repos_parse_fns3_invoke_close_revision(*args)
def svn_repos_parse_fns2_invoke_new_revision_record(*args):
"""svn_repos_parse_fns2_invoke_new_revision_record(svn_repos_parse_fns2_t _obj, apr_hash_t headers, void * parse_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_parse_fns2_invoke_new_revision_record(*args)
def svn_repos_parse_fns2_invoke_uuid_record(*args):
"""svn_repos_parse_fns2_invoke_uuid_record(svn_repos_parse_fns2_t _obj, char const * uuid, void * parse_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_parse_fns2_invoke_uuid_record(*args)
def svn_repos_parse_fns2_invoke_new_node_record(*args):
"""svn_repos_parse_fns2_invoke_new_node_record(svn_repos_parse_fns2_t _obj, apr_hash_t headers, void * revision_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_parse_fns2_invoke_new_node_record(*args)
def svn_repos_parse_fns2_invoke_set_revision_property(*args):
"""svn_repos_parse_fns2_invoke_set_revision_property(svn_repos_parse_fns2_t _obj, void * revision_baton, char const * name, svn_string_t const * value) -> svn_error_t"""
return _repos.svn_repos_parse_fns2_invoke_set_revision_property(*args)
def svn_repos_parse_fns2_invoke_set_node_property(*args):
"""svn_repos_parse_fns2_invoke_set_node_property(svn_repos_parse_fns2_t _obj, void * node_baton, char const * name, svn_string_t const * value) -> svn_error_t"""
return _repos.svn_repos_parse_fns2_invoke_set_node_property(*args)
def svn_repos_parse_fns2_invoke_delete_node_property(*args):
"""svn_repos_parse_fns2_invoke_delete_node_property(svn_repos_parse_fns2_t _obj, void * node_baton, char const * name) -> svn_error_t"""
return _repos.svn_repos_parse_fns2_invoke_delete_node_property(*args)
def svn_repos_parse_fns2_invoke_remove_node_props(*args):
"""svn_repos_parse_fns2_invoke_remove_node_props(svn_repos_parse_fns2_t _obj, void * node_baton) -> svn_error_t"""
return _repos.svn_repos_parse_fns2_invoke_remove_node_props(*args)
def svn_repos_parse_fns2_invoke_set_fulltext(*args):
"""svn_repos_parse_fns2_invoke_set_fulltext(svn_repos_parse_fns2_t _obj, void * node_baton) -> svn_error_t"""
return _repos.svn_repos_parse_fns2_invoke_set_fulltext(*args)
def svn_repos_parse_fns2_invoke_apply_textdelta(*args):
"""svn_repos_parse_fns2_invoke_apply_textdelta(svn_repos_parse_fns2_t _obj, void * node_baton) -> svn_error_t"""
return _repos.svn_repos_parse_fns2_invoke_apply_textdelta(*args)
def svn_repos_parse_fns2_invoke_close_node(*args):
"""svn_repos_parse_fns2_invoke_close_node(svn_repos_parse_fns2_t _obj, void * node_baton) -> svn_error_t"""
return _repos.svn_repos_parse_fns2_invoke_close_node(*args)
def svn_repos_parse_fns2_invoke_close_revision(*args):
"""svn_repos_parse_fns2_invoke_close_revision(svn_repos_parse_fns2_t _obj, void * revision_baton) -> svn_error_t"""
return _repos.svn_repos_parse_fns2_invoke_close_revision(*args)
def svn_repos_invoke_authz_func(*args):
"""
svn_repos_invoke_authz_func(svn_repos_authz_func_t _obj, svn_fs_root_t * root, char const * path, void * baton,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_invoke_authz_func(*args)
def svn_repos_invoke_authz_callback(*args):
"""
svn_repos_invoke_authz_callback(svn_repos_authz_callback_t _obj, svn_repos_authz_access_t required, svn_fs_root_t * root,
char const * path, void * baton, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_invoke_authz_callback(*args)
def svn_repos_invoke_file_rev_handler(*args):
"""
svn_repos_invoke_file_rev_handler(svn_repos_file_rev_handler_t _obj, void * baton, char const * path, svn_revnum_t rev,
apr_hash_t rev_props, apr_array_header_t prop_diffs, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_invoke_file_rev_handler(*args)
def svn_repos_invoke_notify_func(*args):
"""svn_repos_invoke_notify_func(svn_repos_notify_func_t _obj, void * baton, svn_repos_notify_t notify, apr_pool_t scratch_pool)"""
return _repos.svn_repos_invoke_notify_func(*args)
def svn_repos_invoke_freeze_func(*args):
"""svn_repos_invoke_freeze_func(svn_repos_freeze_func_t _obj, void * baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_invoke_freeze_func(*args)
def svn_repos_invoke_history_func(*args):
"""
svn_repos_invoke_history_func(svn_repos_history_func_t _obj, void * baton, char const * path, svn_revnum_t revision,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_invoke_history_func(*args)
class svn_repos_authz_func_t:
"""Proxy of C svn_repos_authz_func_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_repos_authz_func_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_repos_authz_func_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_repos_authz_func_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def __call__(self, *args):
return svn_repos_invoke_authz_func(self, *args)
svn_repos_authz_func_t_swigregister = _repos.svn_repos_authz_func_t_swigregister
svn_repos_authz_func_t_swigregister(svn_repos_authz_func_t)
class svn_repos_authz_callback_t:
"""Proxy of C svn_repos_authz_callback_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_repos_authz_callback_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_repos_authz_callback_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_repos_authz_callback_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def __call__(self, *args):
return svn_repos_invoke_authz_callback(self, *args)
svn_repos_authz_callback_t_swigregister = _repos.svn_repos_authz_callback_t_swigregister
svn_repos_authz_callback_t_swigregister(svn_repos_authz_callback_t)
class svn_repos_file_rev_handler_t:
"""Proxy of C svn_repos_file_rev_handler_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_repos_file_rev_handler_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_repos_file_rev_handler_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_repos_file_rev_handler_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def __call__(self, *args):
return svn_repos_invoke_file_rev_handler(self, *args)
svn_repos_file_rev_handler_t_swigregister = _repos.svn_repos_file_rev_handler_t_swigregister
svn_repos_file_rev_handler_t_swigregister(svn_repos_file_rev_handler_t)
class svn_repos_notify_func_t:
"""Proxy of C svn_repos_notify_func_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_repos_notify_func_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_repos_notify_func_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_repos_notify_func_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def __call__(self, *args):
return svn_repos_invoke_notify_func(self, *args)
svn_repos_notify_func_t_swigregister = _repos.svn_repos_notify_func_t_swigregister
svn_repos_notify_func_t_swigregister(svn_repos_notify_func_t)
class svn_repos_freeze_func_t:
"""Proxy of C svn_repos_freeze_func_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_repos_freeze_func_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_repos_freeze_func_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_repos_freeze_func_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def __call__(self, *args):
return svn_repos_invoke_freeze_func(self, *args)
svn_repos_freeze_func_t_swigregister = _repos.svn_repos_freeze_func_t_swigregister
svn_repos_freeze_func_t_swigregister(svn_repos_freeze_func_t)
class svn_repos_history_func_t:
"""Proxy of C svn_repos_history_func_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_repos_history_func_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_repos_history_func_t, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_repos_history_func_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
_copy_metadata_deep(value, members.get(name))
_assert_valid_deep(value)
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def __call__(self, *args):
return svn_repos_invoke_history_func(self, *args)
svn_repos_history_func_t_swigregister = _repos.svn_repos_history_func_t_swigregister
svn_repos_history_func_t_swigregister(svn_repos_history_func_t)
# This file is compatible with both classic and new-style classes.
| 43.897028
| 171
| 0.766354
| 12,534
| 82,702
| 4.403303
| 0.032791
| 0.136399
| 0.074197
| 0.050262
| 0.911562
| 0.832545
| 0.757714
| 0.694733
| 0.662952
| 0.624522
| 0
| 0.004646
| 0.146429
| 82,702
| 1,883
| 172
| 43.92034
| 0.777184
| 0.33553
| 0
| 0.387652
| 1
| 0
| 0.059619
| 0.002392
| 0
| 0
| 0
| 0
| 0.07085
| 1
| 0.25
| false
| 0.001012
| 0.025304
| 0.030364
| 0.562753
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
aa9f2cd282ffa017ab36897f526eca045caaffdf
| 131,278
|
py
|
Python
|
scielomanager/journalmanager/tests/tests_forms.py
|
jamilatta/scielo-manager
|
d506c6828ba9b1089faa164bc42ba29a0f228e61
|
[
"BSD-2-Clause"
] | null | null | null |
scielomanager/journalmanager/tests/tests_forms.py
|
jamilatta/scielo-manager
|
d506c6828ba9b1089faa164bc42ba29a0f228e61
|
[
"BSD-2-Clause"
] | null | null | null |
scielomanager/journalmanager/tests/tests_forms.py
|
jamilatta/scielo-manager
|
d506c6828ba9b1089faa164bc42ba29a0f228e61
|
[
"BSD-2-Clause"
] | null | null | null |
# coding:utf-8
"""
Use this module to write functional tests for the view-functions, only!
"""
import os
import unittest
from django_webtest import WebTest
from django.core.urlresolvers import reverse
from django.core import mail
from django.test import TestCase
from journalmanager.tests import modelfactories
from journalmanager import forms
from journalmanager import models
from scielomanager.utils.modelmanagers.helpers import (
_patch_userrequestcontextfinder_settings_setup,
_patch_userrequestcontextfinder_settings_teardown,
)
HASH_FOR_123 = 'sha1$93d45$5f366b56ce0444bfea0f5634c7ce8248508c9799'
def _makePermission(perm, model, app_label='journalmanager'):
"""
Retrieves a Permission according to the given model and app_label.
"""
from django.contrib.contenttypes import models
from django.contrib.auth import models as auth_models
ct = models.ContentType.objects.get(model=model,
app_label=app_label)
return auth_models.Permission.objects.get(codename=perm, content_type=ct)
def _makeUseLicense():
ul = models.UseLicense(license_code='TEST')
ul.save()
class CollectionFormTests(WebTest):
def setUp(self):
self.user = modelfactories.UserFactory(is_active=True)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=True)
def test_access_without_permission(self):
"""
Asserts that authenticated users without the required permissions
are unable to access the form. They must be redirected to a page
with informations about their lack of permissions.
"""
collection = modelfactories.CollectionFactory.create()
collection.add_user(self.user, is_manager=False)
response = self.app.get(reverse('collection.edit', args=[collection.pk]),
user=self.user).follow()
response.mustcontain('not authorized to access')
self.assertTemplateUsed(response, 'accounts/unauthorized.html')
def test_POST_workflow_with_valid_formdata(self):
"""
When a valid form is submited, the user is redirected to
the index page.
In order to take this action, the user needs the following
permissions: ``journalmanager.change_collection``.
"""
perm1 = _makePermission(perm='change_collection', model='collection')
self.user.user_permissions.add(perm1)
form = self.app.get(reverse('collection.edit', args=[self.collection.pk]),
user=self.user).forms['collection-form']
form['collection-name'] = 'Brasil'
form['collection-url'] = 'http://www.scielo.br'
form['collection-country'] = 'Brasil'
form['collection-address'] = 'Rua Machado Bittencourt'
form['collection-address_number'] = '430'
form['collection-email'] = 'scielo@scielo.org'
response = form.submit().follow()
self.assertTemplateUsed(response,
'journalmanager/add_collection.html')
response.mustcontain('Saved')
def test_POST_workflow_with_invalid_formdata(self):
"""
When an invalid form is submited, no action is taken, the
form is rendered again and an alert is shown with the message
``There are some errors or missing data``.
"""
perm = _makePermission(perm='change_collection', model='collection')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('collection.edit', args=[self.collection.pk]),
user=self.user).forms['collection-form']
form['collection-name'] = ''
form['collection-url'] = ''
form['collection-country'] = ''
form['collection-address'] = ''
form['collection-address_number'] = ''
form['collection-email'] = ''
response = form.submit()
response.mustcontain('There are some errors or missing data')
def test_form_action_must_be_empty(self):
"""
Asserts that the action attribute of the section form is
empty. This is needed because the same form is used to add
a new or edit an existing entry.
"""
perm = _makePermission(perm='change_collection', model='collection')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('collection.edit', args=[self.collection.pk]),
user=self.user).forms['collection-form']
self.assertEqual(form.action, '')
def test_form_method_must_be_post(self):
"""
Asserts that the method attribute of the section form is
``POST``.
"""
perm = _makePermission(perm='change_collection', model='collection')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('collection.edit', args=[self.collection.pk]),
user=self.user).forms['collection-form']
self.assertEqual(form.method.lower(), 'post')
def test_form_enctype_must_be_multipart_formdata(self):
"""
Asserts that the enctype attribute of the section form is
``multipart/form-data``.
"""
perm = _makePermission(perm='change_collection', model='collection')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('collection.edit', args=[self.collection.pk]),
user=self.user).forms['collection-form']
self.assertEqual(form.enctype.lower(), 'multipart/form-data')
class SectionFormTests(WebTest):
def setUp(self):
self.user = modelfactories.UserFactory(is_active=True)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=True)
def test_access_without_permission(self):
"""
Asserts that authenticated users without the required permissions
are unable to access the form. They must be redirected to a page
with informations about their lack of permissions.
"""
journal = modelfactories.JournalFactory.create()
journal.join(self.collection, self.user)
response = self.app.get(reverse('section.add', args=[journal.pk]),
user=self.user).follow()
response.mustcontain('not authorized to access')
self.assertTemplateUsed(response, 'accounts/unauthorized.html')
def test_basic_structure(self):
"""
Just to make sure that the required hidden fields are all
present.
All the management fields from inlineformsets used in this
form should be part of this test.
"""
perm = _makePermission(perm='change_section', model='section')
self.user.user_permissions.add(perm)
journal = modelfactories.JournalFactory.create()
journal.join(self.collection, self.user)
form = self.app.get(reverse('section.add', args=[journal.pk]),
user=self.user)
self.assertTemplateUsed(form, 'journalmanager/add_section.html')
form.mustcontain('section-form',
'csrfmiddlewaretoken',
'titles-TOTAL_FORMS',
'titles-INITIAL_FORMS',
'titles-MAX_NUM_FORMS',
)
def test_POST_workflow_with_valid_formdata(self):
"""
When a valid form is submited, the user is redirected to
the section's list and the new section must be part
of the list.
In order to take this action, the user needs the following
permissions: ``journalmanager.change_section`` and
``journalmanager.list_section``.
"""
perm1 = _makePermission(perm='change_section', model='section')
self.user.user_permissions.add(perm1)
perm2 = _makePermission(perm='list_section', model='section')
self.user.user_permissions.add(perm2)
journal = modelfactories.JournalFactory.create()
journal.join(self.collection, self.user)
language = modelfactories.LanguageFactory.create(iso_code='en',
name='english')
journal.languages.add(language)
form = self.app.get(reverse('section.add', args=[journal.pk]),
user=self.user).forms['section-form']
form['titles-0-title'] = 'Original Article'
form.set('titles-0-language', language.pk)
response = form.submit().follow()
self.assertTemplateUsed(response,
'journalmanager/section_list.html')
response.mustcontain('Original Article')
def test_POST_workflow_with_invalid_formdata(self):
"""
When an invalid form is submited, no action is taken, the
form is rendered again and an alert is shown with the message
``There are some errors or missing data``.
"""
perm = _makePermission(perm='change_section', model='section')
self.user.user_permissions.add(perm)
journal = modelfactories.JournalFactory.create()
journal.join(self.collection, self.user)
language = modelfactories.LanguageFactory.create(iso_code='en',
name='english')
journal.languages.add(language)
form = self.app.get(reverse('section.add', args=[journal.pk]),
user=self.user).forms['section-form']
response = form.submit()
response.mustcontain('There are some errors or missing data')
def test_POST_workflow_with_exist_title_on_the_same_journal(self):
"""
Asserts that duplacates are allowed
"""
perm1 = _makePermission(perm='change_section', model='section')
self.user.user_permissions.add(perm1)
perm2 = _makePermission(perm='list_section', model='section')
self.user.user_permissions.add(perm2)
journal = modelfactories.JournalFactory.create()
journal.join(self.collection, self.user)
language = modelfactories.LanguageFactory.create(iso_code='en',
name='english')
journal.languages.add(language)
section = modelfactories.SectionFactory(journal=journal)
section.add_title('Original Article', language=language)
form = self.app.get(reverse('section.add', args=[journal.pk]),
user=self.user).forms['section-form']
form['titles-0-title'] = 'Original Article'
form.set('titles-0-language', language.pk)
response = form.submit().follow()
self.assertTemplateUsed(response,
'journalmanager/section_list.html')
def test_section_must_allow_new_title_translations(self):
"""
Asserts that is possible to create new title translations to
existing Sections.
"""
perm1 = _makePermission(perm='change_section', model='section')
self.user.user_permissions.add(perm1)
perm2 = _makePermission(perm='list_section', model='section')
self.user.user_permissions.add(perm2)
journal = modelfactories.JournalFactory.create()
journal.join(self.collection, self.user)
language = modelfactories.LanguageFactory.create(iso_code='en',
name='english')
language2 = modelfactories.LanguageFactory.create(iso_code='pt',
name='portuguese')
journal.languages.add(language)
journal.languages.add(language2)
section = modelfactories.SectionFactory(journal=journal)
section.add_title('Original Article', language=language)
form = self.app.get(reverse('section.edit',
args=[journal.pk, section.pk]), user=self.user).forms['section-form']
form['titles-1-title'] = 'Artigo Original'
form.set('titles-1-language', language2.pk)
response = form.submit().follow()
self.assertTemplateUsed(response,
'journalmanager/section_list.html')
response.mustcontain('Artigo Original')
response.mustcontain('Original Article')
def test_section_translations_not_based_on_the_journal_languages(self):
"""
Section translations are no more restricted to the languages the journal
publishes its contents. See:
https://github.com/scieloorg/SciELO-Manager/issues/502
"""
perm1 = _makePermission(perm='change_section', model='section')
self.user.user_permissions.add(perm1)
perm2 = _makePermission(perm='list_section', model='section')
self.user.user_permissions.add(perm2)
journal = modelfactories.JournalFactory.create()
journal.join(self.collection, self.user)
language = modelfactories.LanguageFactory.create(iso_code='en',
name='english')
language2 = modelfactories.LanguageFactory.create(iso_code='pt',
name='portuguese')
journal.languages.add(language)
form = self.app.get(reverse('section.add',
args=[journal.pk]), user=self.user).forms['section-form']
form['titles-0-title'] = 'Artigo Original'
self.assertIsNone(form.set('titles-0-language', language2.pk))
def test_form_enctype_must_be_urlencoded(self):
"""
Asserts that the enctype attribute of the section form is
``application/x-www-form-urlencoded``
"""
perm = _makePermission(perm='change_section', model='section')
self.user.user_permissions.add(perm)
journal = modelfactories.JournalFactory.create()
journal.join(self.collection, self.user)
form = self.app.get(reverse('section.add', args=[journal.pk]),
user=self.user).forms['section-form']
self.assertEqual(form.enctype, 'application/x-www-form-urlencoded')
def test_form_action_must_be_empty(self):
"""
Asserts that the action attribute of the section form is
empty. This is needed because the same form is used to add
a new or edit an existing entry.
"""
perm = _makePermission(perm='change_section', model='section')
self.user.user_permissions.add(perm)
journal = modelfactories.JournalFactory.create()
journal.join(self.collection, self.user)
form = self.app.get(reverse('section.add', args=[journal.pk]),
user=self.user).forms['section-form']
self.assertEqual(form.action, '')
def test_form_method_must_be_post(self):
"""
Asserts that the method attribute of the section form is
``POST``.
"""
perm = _makePermission(perm='change_section', model='section')
self.user.user_permissions.add(perm)
journal = modelfactories.JournalFactory.create()
journal.join(self.collection, self.user)
form = self.app.get(reverse('section.add', args=[journal.pk]),
user=self.user).forms['section-form']
self.assertEqual(form.method.lower(), 'post')
class UserFormTests(WebTest):
def setUp(self):
self.user = modelfactories.UserFactory(is_active=True)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=True)
def test_access_without_permission(self):
"""
Asserts that authenticated users without the required permissions
are unable to access the form. They must be redirected to a page
with informations about their lack of permissions.
"""
response = self.app.get(reverse('user.add'),
user=self.user).follow()
response.mustcontain('not authorized to access')
self.assertTemplateUsed(response, 'accounts/unauthorized.html')
def test_access_without_being_manager(self):
"""
Asserts that authenticated users that are not managers of the
collection are unable to access the form. They must be redirected
to a page with informations about their lack of permissions.
"""
perm = _makePermission(perm='change_user',
model='user', app_label='auth')
self.user.user_permissions.add(perm)
# adding another collection the user lacks manager privileges
other_collection = modelfactories.CollectionFactory.create()
other_collection.add_user(self.user, is_manager=False)
other_collection.make_default_to_user(self.user)
response = self.app.get(reverse('user.add'),
user=self.user).follow()
response.mustcontain('not authorized to access')
self.assertTemplateUsed(response, 'accounts/unauthorized.html')
def test_basic_structure(self):
"""
Just to make sure that the required hidden fields are all
present.
All the management fields from inlineformsets used in this
form should be part of this test.
"""
perm = _makePermission(perm='change_user',
model='user', app_label='auth')
self.user.user_permissions.add(perm)
page = self.app.get(reverse('user.add'), user=self.user)
self.assertTemplateUsed(page, 'journalmanager/add_user.html')
page.mustcontain('user-form',
'csrfmiddlewaretoken',
'usercollections-TOTAL_FORMS',
'usercollections-INITIAL_FORMS',
'usercollections-MAX_NUM_FORMS',
)
def test_POST_workflow_with_valid_formdata(self):
"""
When a valid form is submited, the user is redirected to
the user's list and the new user must be part
of the list.
An email must be sent to the new user.
In order to take this action, the user needs the following
permissions: ``journalmanager.change_user``.
"""
perm = _makePermission(perm='change_user',
model='user', app_label='auth')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('user.add'),
user=self.user).forms['user-form']
form['user-username'] = 'bazz'
form['user-first_name'] = 'foo'
form['user-last_name'] = 'bar'
form['userprofile-0-email'] = 'bazz@spam.org'
# form.set('asmSelect0', '1') # groups
form.set('usercollections-0-collection', self.collection.pk)
response = form.submit().follow()
self.assertTemplateUsed(response, 'journalmanager/user_list.html')
response.mustcontain('bazz', 'bazz@spam.org')
# check if basic state has been set
self.assertTrue(response.context['user'].user_collection.get(
pk=self.collection.pk))
def test_new_users_must_receive_an_email_to_define_their_password(self):
perm = _makePermission(perm='change_user',
model='user', app_label='auth')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('user.add'),
user=self.user).forms['user-form']
form['user-username'] = 'bazz'
form['user-first_name'] = 'foo'
form['user-last_name'] = 'bar'
form['userprofile-0-email'] = 'bazz@spam.org'
form.set('usercollections-0-collection', self.collection.pk)
response = form.submit().follow()
# check if an email has been sent to the new user
self.assertEqual(len(mail.outbox), 1)
self.assertIn('bazz@spam.org', mail.outbox[0].recipients())
def test_emails_are_not_sent_when_users_data_are_modified(self):
perm = _makePermission(perm='change_user',
model='user', app_label='auth')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('user.edit', args=[self.user.pk]),
user=self.user).forms['user-form']
form['user-username'] = 'bazz'
form['user-first_name'] = 'foo'
form['user-last_name'] = 'bar'
form['userprofile-0-email'] = 'bazz@spam.org'
form.set('usercollections-0-collection', self.collection.pk)
response = form.submit().follow()
# check if the outbox is empty
self.assertEqual(len(mail.outbox), 0)
def test_POST_workflow_with_invalid_formdata(self):
"""
When an invalid form is submited, no action is taken, the
form is rendered again and an alert is shown with the message
``There are some errors or missing data``.
"""
perm = _makePermission(perm='change_user',
model='user', app_label='auth')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('user.add'),
user=self.user).forms['user-form']
response = form.submit()
response.mustcontain('There are some errors or missing data')
def test_POST_workflow_with_invalid_formdata_without_collection_add_form(self):
"""
In order to take this action, the user needs the following
permissions: ``journalmanager.change_user``.
The collection is mandatory on user add form.
"""
perm = _makePermission(perm='change_user',
model='user', app_label='auth')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('user.add'),
user=self.user).forms['user-form']
form['user-username'] = 'bazz'
form['user-first_name'] = 'foo'
form['user-last_name'] = 'bar'
form['userprofile-0-email'] = 'bazz@spam.org'
response = form.submit()
self.assertTemplateUsed(response, 'journalmanager/add_user.html')
response.mustcontain('Please fill in at least one form')
def test_POST_workflow_with_invalid_formdata_without_collection_edit_form(self):
"""
In order to take this action, the user needs the following
permissions: ``journalmanager.change_user``.
The collection is mandatory on user edit form.
"""
perm = _makePermission(perm='change_user',
model='user', app_label='auth')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('user.edit', args=[self.user.pk]),
user=self.user).forms['user-form']
form['user-username'] = 'bazz'
form['user-first_name'] = 'foo'
form['user-last_name'] = 'bar'
form['userprofile-0-email'] = 'bazz@spam.org'
#Remove the collection
form.set('usercollections-0-collection', '')
response = form.submit()
self.assertTemplateUsed(response, 'journalmanager/add_user.html')
response.mustcontain('Please fill in at least one form')
def test_form_enctype_must_be_urlencoded(self):
"""
Asserts that the enctype attribute of the user form is
``application/x-www-form-urlencoded``
"""
perm = _makePermission(perm='change_user',
model='user', app_label='auth')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('user.add'),
user=self.user).forms['user-form']
self.assertEqual(form.enctype, 'application/x-www-form-urlencoded')
def test_form_action_must_be_empty(self):
"""
Asserts that the action attribute of the user form is
empty. This is needed because the same form is used to add
a new or edit an existing entry.
"""
perm = _makePermission(perm='change_user',
model='user', app_label='auth')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('user.add'),
user=self.user).forms['user-form']
self.assertEqual(form.action, '')
def test_form_method_must_be_post(self):
"""
Asserts that the method attribute of the user form is
``POST``.
"""
perm = _makePermission(perm='change_user',
model='user', app_label='auth')
self.user.user_permissions.add(perm)
form = self.app.get(reverse('user.add'),
user=self.user).forms['user-form']
self.assertEqual(form.method.lower(), 'post')
def test_add_users_only_to_managed_collections(self):
"""
A user can only add users to collections which he is manager.
In order to take this action, the user needs the following
permissions: ``journalmanager.change_user``.
"""
perm = _makePermission(perm='change_user',
model='user', app_label='auth')
self.user.user_permissions.add(perm)
other_collection = modelfactories.CollectionFactory.create()
other_collection.add_user(self.user)
form = self.app.get(reverse('user.add'),
user=self.user).forms['user-form']
self.assertRaises(ValueError, lambda: form.set('usercollections-0-collection', other_collection.pk))
class JournalFormTests(WebTest):
def setUp(self):
self.user = modelfactories.UserFactory(is_active=True)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=True)
_makeUseLicense()
def test_access_without_permission(self):
"""
Asserts that authenticated users without the required permissions
are unable to access the form. They must be redirected to a page
with informations about their lack of permissions.
"""
response = self.app.get(reverse('journal.add'),
user=self.user).follow()
response.mustcontain('not authorized to access')
self.assertTemplateUsed(response, 'accounts/unauthorized.html')
def test_basic_structure(self):
"""
Just to make sure that the required hidden fields are all
present.
All the management fields from inlineformsets used in this
form should be part of this test.
"""
perm = _makePermission(perm='change_journal',
model='journal',
app_label='journalmanager')
self.user.user_permissions.add(perm)
response = self.app.get(reverse('journal.add'), user=self.user)
self.assertTemplateUsed(response, 'journalmanager/add_journal.html')
response.mustcontain('journal-form',
'csrfmiddlewaretoken',
'title-TOTAL_FORMS',
'title-INITIAL_FORMS',
'title-MAX_NUM_FORMS',
'mission-TOTAL_FORMS',
'mission-INITIAL_FORMS',
'mission-MAX_NUM_FORMS',
)
def test_POST_workflow_with_invalid_formdata(self):
"""
When an invalid form is submited, no action is taken, the
form is rendered again and an alert is shown with the message
``There are some errors or missing data``.
"""
perm = _makePermission(perm='change_journal',
model='journal', app_label='journalmanager')
self.user.user_permissions.add(perm)
sponsor = modelfactories.SponsorFactory.create()
form = self.app.get(reverse('journal.add'), user=self.user).forms['journal-form']
form['journal-sponsor'] = [sponsor.pk]
form['journal-ctrl_vocabulary'] = 'decs'
form['journal-frequency'] = 'Q'
form['journal-final_num'] = ''
form['journal-eletronic_issn'] = '0102-6720'
form['journal-init_vol'] = '1'
form['journal-title'] = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
form['journal-title_iso'] = u'ABCD. Arquivos B. de C. D. (São Paulo)'
form['journal-short_title'] = u'ABCD.(São Paulo)'
form['journal-editorial_standard'] = 'vancouv'
form['journal-scielo_issn'] = 'print'
form['journal-init_year'] = '1986'
form['journal-acronym'] = 'ABCD'
form['journal-pub_level'] = 'CT'
form['journal-init_num'] = '1'
form['journal-final_vol'] = ''
form['journal-subject_descriptors'] = 'MEDICINA, CIRURGIA, GASTROENTEROLOGIA, GASTROENTEROLOGIA'
form['journal-print_issn'] = '0102-6720'
form['journal-copyrighter'] = 'Texto do copyrighter'
form['journal-publisher_name'] = 'Colégio Brasileiro de Cirurgia Digestiva'
form['journal-publisher_country'] = 'BR'
form['journal-publisher_state'] = 'SP'
form['journal-publication_city'] = 'São Paulo'
form['journal-editor_address'] = 'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 \
São Paulo/SP Brasil, Tel.: (11) 3288-8174/3289-0741'
form['journal-editor_email'] = 'cbcd@cbcd.org.br'
response = form.submit()
self.assertTrue('alert alert-error', response.body)
self.assertIn('There are some errors or missing data', response.body)
self.assertTemplateUsed(response, 'journalmanager/add_journal.html')
@unittest.skip('datamodel-ovehaul-v2')
def test_user_add_journal_with_valid_formdata(self):
"""
When a valid form is submited, the user is redirected to
the journal's list and the new user must be part
of the list.
In order to take this action, the user needs the following
permissions: ``journalmanager.change_journal`` and
``journalmanager.list_journal``.
"""
perm_journal_change = _makePermission(perm='change_journal',
model='journal', app_label='journalmanager')
perm_journal_list = _makePermission(perm='list_journal',
model='journal', app_label='journalmanager')
self.user.user_permissions.add(perm_journal_change)
self.user.user_permissions.add(perm_journal_list)
sponsor = modelfactories.SponsorFactory.create()
use_license = modelfactories.UseLicenseFactory.create()
language = modelfactories.LanguageFactory.create()
subject_category = modelfactories.SubjectCategoryFactory.create()
study_area = modelfactories.StudyAreaFactory.create()
form = self.app.get(reverse('journal.add'), user=self.user).forms[1]
form['journal-sponsor'] = [sponsor.pk]
form['journal-study_areas'] = [study_area.pk]
form['journal-ctrl_vocabulary'] = 'decs'
form['journal-frequency'] = 'Q'
form['journal-final_num'] = ''
form['journal-eletronic_issn'] = '0102-6720'
form['journal-init_vol'] = '1'
form['journal-title'] = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
form['journal-title_iso'] = u'ABCD. Arquivos B. de C. D. (São Paulo)'
form['journal-short_title'] = u'ABCD.(São Paulo)'
form['journal-editorial_standard'] = 'vancouv'
form['journal-scielo_issn'] = 'print'
form['journal-init_year'] = '1986'
form['journal-acronym'] = 'ABCD'
form['journal-pub_level'] = 'CT'
form['journal-init_num'] = '1'
form['journal-final_vol'] = ''
form['journal-subject_descriptors'] = 'MEDICINA, CIRURGIA, GASTROENTEROLOGIA, GASTROENTEROLOGIA'
form['journal-print_issn'] = '0102-6720'
form['journal-copyrighter'] = 'Texto do copyrighter'
form['journal-publisher_name'] = 'Colégio Brasileiro de Cirurgia Digestiva'
form['journal-publisher_country'] = 'BR'
form['journal-publisher_state'] = 'SP'
form['journal-publication_city'] = 'São Paulo'
form['journal-editor_name'] = 'Colégio Brasileiro de Cirurgia Digestiva'
form['journal-editor_address'] = 'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11'
form['journal-editor_address_city'] = 'São Paulo'
form['journal-editor_address_state'] = 'SP'
form['journal-editor_address_zip'] = '01318-901'
form['journal-editor_address_country'] = 'BR'
form['journal-editor_phone1'] = '(11) 3288-8174'
form['journal-editor_phone2'] = '(11) 3289-0741'
form['journal-editor_email'] = 'cbcd@cbcd.org.br'
form['journal-use_license'] = use_license.pk
form.set('journal-collections', str(self.collection.pk))
form['journal-languages'] = [language.pk]
form['journal-abstract_keyword_languages'] = [language.pk]
form.set('journal-subject_categories', str(subject_category.pk))
form['journal-is_indexed_scie'] = True
form['journal-is_indexed_ssci'] = False
form['journal-is_indexed_aehci'] = True
upload_cover_name = os.path.dirname(__file__) + '/image_test/cover.gif'
uploaded_cover_contents = open(upload_cover_name, "rb").read()
form.set('journal-cover', (upload_cover_name, uploaded_cover_contents))
response = form.submit().follow()
self.assertIn('Saved.', response.body)
self.assertIn('ABCD.(São Paulo)',
response.body)
self.assertTemplateUsed(response, 'journalmanager/journal_dash.html')
def test_form_enctype_must_be_multipart_formdata(self):
"""
Asserts that the enctype attribute of the user form is
``multipart/form-data``
"""
perm_journal_change = _makePermission(perm='change_journal',
model='journal', app_label='journalmanager')
perm_journal_list = _makePermission(perm='list_journal',
model='journal', app_label='journalmanager')
self.user.user_permissions.add(perm_journal_change)
self.user.user_permissions.add(perm_journal_list)
form = self.app.get(reverse('journal.add'), user=self.user).forms[1]
self.assertEqual(form.enctype, 'multipart/form-data')
def test_form_action_must_be_empty(self):
"""
Asserts that the action attribute of the journal form is
empty. This is needed because the same form is used to add
a new or edit an existing entry.
"""
perm_journal_change = _makePermission(perm='change_journal',
model='journal', app_label='journalmanager')
perm_journal_list = _makePermission(perm='list_journal',
model='journal', app_label='journalmanager')
self.user.user_permissions.add(perm_journal_change)
self.user.user_permissions.add(perm_journal_list)
form = self.app.get(reverse('journal.add'), user=self.user).forms[1]
self.assertEqual(form.action, '')
def test_form_method_must_be_post(self):
"""
Asserts that the method attribute of the journal form is
``POST``.
"""
perm_journal_change = _makePermission(perm='change_journal',
model='journal', app_label='journalmanager')
perm_journal_list = _makePermission(perm='list_journal',
model='journal', app_label='journalmanager')
self.user.user_permissions.add(perm_journal_change)
self.user.user_permissions.add(perm_journal_list)
form = self.app.get(reverse('journal.add'), user=self.user).forms[1]
self.assertEqual(form.method.lower(), 'post')
class SponsorFormTests(WebTest):
def setUp(self):
self.user = modelfactories.UserFactory(is_active=True)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=True)
def test_basic_structure(self):
"""
Just to make sure that the required hidden fields are all
present.
All the management fields from inlineformsets used in this
form should be part of this test.
"""
perm = _makePermission(perm='add_sponsor',
model='sponsor', app_label='journalmanager')
self.user.user_permissions.add(perm)
page = self.app.get(reverse('sponsor.add'), user=self.user)
page.mustcontain('sponsor-name', 'sponsor-collections')
self.assertTemplateUsed(page, 'journalmanager/add_sponsor.html')
def test_access_without_permission(self):
"""
Asserts that authenticated users without the required permissions
are unable to access the form. They must be redirected to a page
with informations about their lack of permissions.
"""
page = self.app.get(reverse('sponsor.add'), user=self.user).follow()
self.assertTemplateUsed(page, 'accounts/unauthorized.html')
page.mustcontain('not authorized to access')
def test_POST_workflow_with_valid_formdata(self):
"""
When a valid form is submited, the user is redirected to
the sponsor's list and the new sponsor must be part
of the list.
In order to take this action, the user needs the following
permissions: ``journalmanager.add_sponsor`` and
``journalmanager.list_sponsor``.
"""
perm_sponsor_change = _makePermission(perm='add_sponsor',
model='sponsor', app_label='journalmanager')
perm_sponsor_list = _makePermission(perm='list_sponsor',
model='sponsor', app_label='journalmanager')
self.user.user_permissions.add(perm_sponsor_change)
self.user.user_permissions.add(perm_sponsor_list)
form = self.app.get(reverse('sponsor.add'), user=self.user).forms['sponsor-form']
form['sponsor-name'] = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'
form['sponsor-address'] = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \
Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'
form['sponsor-email'] = 'fapesp@scielo.org'
form['sponsor-complement'] = ''
form['sponsor-collections'] = [self.collection.pk]
response = form.submit().follow()
self.assertTemplateUsed(response,
'journalmanager/sponsor_list.html')
self.assertIn('Saved.', response.body)
self.assertIn('Funda\xc3\xa7\xc3\xa3o de Amparo a Pesquisa do Estado de S\xc3\xa3o Paulo', response.body)
def test_POST_workflow_with_invalid_formdata(self):
"""
When an invalid form is submited, no action is taken, the
form is rendered again and an alert is shown with the message
``There are some errors or missing data``.
"""
perm_sponsor_change = _makePermission(perm='add_sponsor',
model='sponsor', app_label='journalmanager')
perm_sponsor_list = _makePermission(perm='list_sponsor',
model='sponsor', app_label='journalmanager')
self.user.user_permissions.add(perm_sponsor_change)
self.user.user_permissions.add(perm_sponsor_list)
form = self.app.get(reverse('sponsor.add'), user=self.user).forms['sponsor-form']
form['sponsor-address'] = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \
Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'
form['sponsor-email'] = 'fapesp@scielo.org'
form['sponsor-complement'] = ''
form['sponsor-collections'] = [self.collection.pk]
response = form.submit()
self.assertTrue('alert alert-error' in response.body)
self.assertIn('There are some errors or missing data', response.body)
self.assertTemplateUsed(response, 'journalmanager/add_sponsor.html')
def test_form_enctype_must_be_urlencoded(self):
"""
Asserts that the enctype attribute of the sponsor form is
``application/x-www-form-urlencoded``
"""
perm_sponsor_change = _makePermission(perm='add_sponsor',
model='sponsor', app_label='journalmanager')
perm_sponsor_list = _makePermission(perm='list_sponsor',
model='sponsor', app_label='journalmanager')
self.user.user_permissions.add(perm_sponsor_change)
self.user.user_permissions.add(perm_sponsor_list)
form = self.app.get(reverse('sponsor.add'), user=self.user).forms['sponsor-form']
self.assertEqual(form.enctype, 'application/x-www-form-urlencoded')
def test_form_action_must_be_empty(self):
"""
Asserts that the action attribute of the sponsor form is
empty. This is needed because the same form is used to add
a new or edit an existing entry.
"""
perm_sponsor_change = _makePermission(perm='add_sponsor',
model='sponsor', app_label='journalmanager')
perm_sponsor_list = _makePermission(perm='list_sponsor',
model='sponsor', app_label='journalmanager')
self.user.user_permissions.add(perm_sponsor_change)
self.user.user_permissions.add(perm_sponsor_list)
form = self.app.get(reverse('sponsor.add'), user=self.user).forms['sponsor-form']
self.assertEqual(form.action, '')
def test_form_method_must_be_post(self):
"""
Asserts that the method attribute of the sponsor form is
``POST``.
"""
perm_sponsor_change = _makePermission(perm='add_sponsor',
model='sponsor', app_label='journalmanager')
perm_sponsor_list = _makePermission(perm='list_sponsor',
model='sponsor', app_label='journalmanager')
self.user.user_permissions.add(perm_sponsor_change)
self.user.user_permissions.add(perm_sponsor_list)
form = self.app.get(reverse('sponsor.add'), user=self.user).forms['sponsor-form']
self.assertEqual(form.method.lower(), 'post')
def test_collections_field_must_only_display_collections_the_user_is_bound(self):
"""
Asserts that the user cannot add a sponsor to a collection
that he is not related to.
"""
perm_sponsor_change = _makePermission(perm='add_sponsor',
model='sponsor', app_label='journalmanager')
perm_sponsor_list = _makePermission(perm='list_sponsor',
model='sponsor', app_label='journalmanager')
self.user.user_permissions.add(perm_sponsor_change)
self.user.user_permissions.add(perm_sponsor_list)
another_collection = modelfactories.CollectionFactory.create()
form = self.app.get(reverse('sponsor.add'), user=self.user).forms['sponsor-form']
self.assertRaises(ValueError,
lambda: form.set('sponsor-collections', [another_collection.pk]))
class IssueBaseFormClassTests(unittest.TestCase):
def test_basic_structure(self):
issue_form = forms.IssueBaseForm()
from django import forms as dj_forms
expected = {'section': dj_forms.ModelMultipleChoiceField,
'volume': dj_forms.CharField,
'publication_start_month': dj_forms.ChoiceField,
'publication_end_month': dj_forms.ChoiceField,
'publication_year': dj_forms.IntegerField,
'is_marked_up': dj_forms.BooleanField,
'use_license': dj_forms.ModelChoiceField,
'total_documents': dj_forms.IntegerField,
'ctrl_vocabulary': dj_forms.ChoiceField,
'editorial_standard': dj_forms.ChoiceField,
'cover': dj_forms.ImageField,
}
self.assertEqual(len(expected.keys()), len(issue_form.fields.keys()))
self.assertEqual(sorted(expected.keys()), sorted(issue_form.fields.keys()))
self.assertEqual(
expected,
{fname: type(field) for fname, field in issue_form.fields.items()}
)
def test_save_commit_eq_False(self):
from journalmanager import models
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': '1',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.RegularIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
issue_model = issue_form.save(commit=False)
issue_model.journal = journal
issue_model.save()
issue_form.save_m2m()
self.assertIsInstance(issue_model, models.Issue)
self.assertTrue(section in issue_model.section.all())
self.assertEqual(issue_model.volume, u'1')
self.assertEqual(issue_model.publication_start_month, u'1')
self.assertEqual(issue_model.publication_end_month, u'2')
self.assertEqual(issue_model.publication_year, 2014)
self.assertEqual(issue_model.is_marked_up, True)
self.assertEqual(issue_model.use_license, use_license)
self.assertEqual(issue_model.total_documents, 10)
self.assertEqual(issue_model.ctrl_vocabulary, u'nd')
self.assertEqual(issue_model.editorial_standard, u'iso690')
self.assertEqual(issue_model.cover, None)
def test_save_m2m_while_commit_eq_False(self):
from journalmanager import models
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': '1',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.RegularIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
issue_model = issue_form.save(commit=False)
self.assertTrue(hasattr(issue_form, 'save_m2m'))
class RegularIssueFormClassTests(unittest.TestCase):
def test_journal_kwargs_is_required(self):
self.assertRaises(TypeError, lambda: forms.RegularIssueForm())
def test_inheritance(self):
# By checking the inheritance, we assume that all base fields are present.
self.assertTrue(issubclass(forms.RegularIssueForm, forms.IssueBaseForm))
def test_basic_structure(self):
from django import forms as dj_forms
journal = modelfactories.JournalFactory()
issue_form = forms.RegularIssueForm(params={'journal': journal})
self.assertEqual(dj_forms.CharField, type(issue_form.fields['number']))
def test_clean(self):
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': '1',
'number': '2',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.RegularIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertTrue(issue_form.is_valid())
def test_clean_volume_only(self):
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': '1',
'number': '',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.RegularIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertTrue(issue_form.is_valid())
def test_clean_number_only(self):
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': '',
'number': '1',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.RegularIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertTrue(issue_form.is_valid())
def test_clean_fails_if_missing_volume_and_number(self):
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': '',
'number': '',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.RegularIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_fails_if_issue_is_duplicated(self):
issue = modelfactories.IssueFactory(type='regular')
journal = issue.journal
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': issue.volume,
'number': issue.number,
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': issue.publication_year,
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.RegularIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_fails_if_duplicated_issue(self):
journal = modelfactories.JournalFactory()
issue = modelfactories.IssueFactory(type='regular', volume='1',
number='2', publication_year=2013, journal=journal)
issue2 = modelfactories.IssueFactory(type='regular', volume='1',
number='2', publication_year=2013, journal=journal)
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': issue.volume,
'number': issue.number,
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': issue.publication_year,
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.RegularIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_on_edit(self):
journal = modelfactories.JournalFactory()
issue = modelfactories.IssueFactory(type='regular', volume='1',
number='2', publication_year=2013, journal=journal)
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': issue.volume,
'number': issue.number,
'publication_start_month': '2',
'publication_end_month': '2',
'publication_year': issue.publication_year,
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.RegularIssueForm(POST,
instance=issue,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertTrue(issue_form.is_valid())
class SupplementIssueFormClassTests(unittest.TestCase):
def test_journal_kwargs_is_required(self):
self.assertRaises(TypeError, lambda: forms.SupplementIssueForm())
def test_inheritance(self):
# By checking the inheritance, we assume that all base fields are present.
self.assertTrue(issubclass(forms.SupplementIssueForm, forms.IssueBaseForm))
def test_basic_structure(self):
from django import forms as dj_forms
journal = modelfactories.JournalFactory()
issue_form = forms.SupplementIssueForm(params={'journal': journal})
self.assertEqual(dj_forms.CharField, type(issue_form.fields['number']))
self.assertEqual(dj_forms.ChoiceField, type(issue_form.fields['suppl_type']))
def test_clean_for_volume_type(self):
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'suppl_text': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod',
'suppl_type' : 'volume',
'volume': '1',
'number': '',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertTrue(issue_form.is_valid())
def test_clean_for_type_number(self):
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'suppl_text': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod',
'suppl_type' : 'number',
'volume': '',
'number': '1',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertTrue(issue_form.is_valid())
def test_clean_fail_for_type_number_with_both_volume_and_number(self):
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'suppl_text': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod',
'suppl_type' : 'number',
'volume': '1',
'number': '1',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_fail_for_type_volume_with_both_volume_and_number(self):
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'suppl_text': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod',
'suppl_type' : 'volume',
'volume': '1',
'number': '1',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_fail_for_type_number_without_number(self):
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'suppl_text': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod',
'suppl_type' : 'number',
'volume': '1',
'number': '',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_fail_for_type_volume_without_volume(self):
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'suppl_text': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod',
'suppl_type' : 'number',
'volume': '1',
'number': '',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_fail_for_type_number_without_number_and_without_volume(self):
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'suppl_text': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod',
'suppl_type' : 'number',
'volume': '',
'number': '',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_fail_for_type_volume_without_number_and_without_volume(self):
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'suppl_text': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod',
'suppl_type' : 'volume',
'volume': '',
'number': '',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_fails_for_type_number_if_duplicated_issue(self):
journal = modelfactories.JournalFactory()
issue = modelfactories.IssueFactory(volume='',
number='1',
suppl_text='1',
publication_year=2013,
journal=journal,
type='supplement')
issue2 = modelfactories.IssueFactory(volume='',
number='1',
suppl_text='1',
publication_year=2013,
journal=journal,
type='supplement')
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': issue.volume,
'number': issue.number,
'suppl_type':'number',
'suppl_text': issue.suppl_text,
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': issue.publication_year,
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_fails_for_type_volume_if_duplicated_issue(self):
journal = modelfactories.JournalFactory()
issue = modelfactories.IssueFactory(volume='1',
number='',
suppl_text='1',
publication_year=2013,
journal=journal,
type='supplement')
issue2 = modelfactories.IssueFactory(volume='1',
number='',
suppl_text='1',
publication_year=2013,
journal=journal,
type='supplement')
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': issue.volume,
'number': issue.number,
'suppl_type':'volume',
'suppl_text': issue.suppl_text,
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': issue.publication_year,
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_fails_for_type_number_if_issue_already_exist(self):
issue = modelfactories.IssueFactory(number='1', volume='', type='supplement')
journal = issue.journal
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': issue.volume,
'number': issue.number,
'suppl_type':issue.suppl_type,
'suppl_text': issue.suppl_text,
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': issue.publication_year,
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_fails_for_type_volume_if_issue_already_exist(self):
issue = modelfactories.IssueFactory(number='', volume='1', type='supplement')
journal = issue.journal
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': issue.volume,
'number': issue.number,
'suppl_type':issue.suppl_type,
'suppl_text': issue.suppl_text,
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': issue.publication_year,
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertFalse(issue_form.is_valid())
def test_clean_for_type_number_on_edit(self):
journal = modelfactories.JournalFactory()
issue = modelfactories.IssueFactory(volume='',
number='2',
suppl_text='1',
publication_year=2013,
journal=journal,
type='supplement')
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': issue.volume,
'number': issue.number,
'suppl_type':issue.suppl_type,
'suppl_text': issue.suppl_text,
'publication_start_month': '2',
'publication_end_month': '2',
'publication_year': issue.publication_year,
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
instance=issue,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertTrue(issue_form.is_valid())
def test_clean_for_type_volume_on_edit(self):
journal = modelfactories.JournalFactory()
issue = modelfactories.IssueFactory(volume='2',
number='',
suppl_text='1',
publication_year=2013,
journal=journal,
type='supplement')
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': issue.volume,
'number': issue.number,
'suppl_type':issue.suppl_type,
'suppl_text': issue.suppl_text,
'publication_start_month': '2',
'publication_end_month': '2',
'publication_year': issue.publication_year,
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SupplementIssueForm(POST,
instance=issue,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertTrue(issue_form.is_valid())
class SpecialIssueFormClassTests(unittest.TestCase):
def test_journal_kwargs_is_required(self):
self.assertRaises(TypeError, lambda: forms.SpecialIssueForm())
def test_inheritance(self):
# By checking the inheritance, we assume that all base fields are present.
self.assertTrue(issubclass(forms.SpecialIssueForm, forms.RegularIssueForm))
def test_basic_structure(self):
from django import forms as dj_forms
journal = modelfactories.JournalFactory()
issue_form = forms.SpecialIssueForm(params={'journal': journal})
self.assertEqual(dj_forms.CharField, type(issue_form.fields['number']))
def test_mandatory_number_value(self):
from django import forms as dj_forms
from journalmanager.forms import SPECIAL_ISSUE_FORM_FIELD_NUMBER
journal = modelfactories.JournalFactory()
issue_form = forms.SpecialIssueForm(params={'journal': journal})
self.assertEqual(issue_form['number'].value(), SPECIAL_ISSUE_FORM_FIELD_NUMBER)
def test_clean(self):
from journalmanager.forms import SPECIAL_ISSUE_FORM_FIELD_NUMBER
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': '',
'number': SPECIAL_ISSUE_FORM_FIELD_NUMBER,
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_regular_form = forms.RegularIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertTrue(issue_regular_form.is_valid())
issue_form = forms.SpecialIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertTrue(issue_form.is_valid())
def test_clean_with_any_number_value(self):
from journalmanager.forms import SPECIAL_ISSUE_FORM_FIELD_NUMBER
journal = modelfactories.JournalFactory()
section = modelfactories.SectionFactory(journal=journal)
use_license = modelfactories.UseLicenseFactory()
POST = {
'section': [section.pk],
'volume': '',
'number': '1',
'publication_start_month': '1',
'publication_end_month': '2',
'publication_year': '2014',
'is_marked_up': True,
'use_license': use_license.pk,
'total_documents': '10',
'ctrl_vocabulary': 'nd',
'editorial_standard': 'iso690',
'cover': '',
}
issue_form = forms.SpecialIssueForm(POST,
params={'journal': journal},
querysets={
'section': journal.section_set.all(),
'use_license': models.UseLicense.objects.all(),
})
self.assertTrue(issue_form.is_valid())
self.assertEqual(issue_form.cleaned_data['number'], SPECIAL_ISSUE_FORM_FIELD_NUMBER)
####
# Integration tests on forms
####
class IssueFormTests(WebTest):
@_patch_userrequestcontextfinder_settings_setup
def setUp(self):
self.user = modelfactories.UserFactory(is_active=True)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=True)
self.collection.make_default_to_user(self.user)
self.journal = modelfactories.JournalFactory.create()
self.journal.join(self.collection, self.user)
@_patch_userrequestcontextfinder_settings_teardown
def tearDown(self):
pass
def test_basic_struture(self):
"""
Just to make sure that the required hidden fields are all
present.
All the management fields from inlineformsets used in this
form should be part of this test.
"""
perm = _makePermission(perm='add_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm)
for t in ['regular', 'supplement', 'special']:
page = self.app.get(reverse('issue.add_%s' % t, args=[self.journal.pk]), user=self.user)
page.mustcontain('number', 'cover',
'title-0-title',
'title-0-language',
'title-TOTAL_FORMS',
'title-INITIAL_FORMS',
'title-MAX_NUM_FORMS')
self.assertTemplateUsed(page, 'journalmanager/add_issue_%s.html' % t)
def test_access_without_permission(self):
"""
Asserts that authenticated users without the required permissions
are unable to access the form. They must be redirected to a page
with informations about their lack of permissions.
"""
for t in ['regular', 'supplement', 'special']:
page = self.app.get(reverse('issue.add_%s' % t, args=[self.journal.pk]), user=self.user).follow()
self.assertTemplateUsed(page, 'accounts/unauthorized.html')
page.mustcontain('not authorized to access')
def test_POST_workflow_with_valid_formdata(self):
"""
When a valid form is submited, the user is redirected to
the issue's list and the new user must be part
of the list.
In order to take this action, the user needs the following
permissions: ``journalmanager.add_issue`` and
``journalmanager.list_issue``.
"""
perm_issue_change = _makePermission(perm='add_issue',
model='issue', app_label='journalmanager')
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_issue_change)
self.user.user_permissions.add(perm_issue_list)
for t in ['regular', 'supplement', 'special']:
form = self.app.get(reverse('issue.add_%s' % t, args=[self.journal.pk]), user=self.user).forms['issue-form']
if t == 'supplement':
form['number'] = ''
form['volume'] = '29'
form['suppl_type'] = 'volume'
form['suppl_text'] = 'suppl.X'
elif t == 'special':
form['number'] = '3'
else: # regular
form['number'] = '3'
form['volume'] = '29'
form['total_documents'] = '16'
form.set('ctrl_vocabulary', 'decs')
form['publication_start_month'] = '9'
form['publication_end_month'] = '11'
form['publication_year'] = '2012'
form['is_marked_up'] = False
form['editorial_standard'] = 'other'
response = form.submit().follow()
self.assertIn('Saved.', response.body)
self.assertTemplateUsed(response, 'journalmanager/issue_list.html')
def test_POST_workflow_without_volume_and_number_formdata(self):
"""
When a user submit a issue the form must contain unless one of the
fields Volume or Number
"""
perm_issue_change = _makePermission(perm='add_issue',
model='issue', app_label='journalmanager')
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_issue_change)
self.user.user_permissions.add(perm_issue_list)
for t in ['regular', 'supplement', 'special']:
form = self.app.get(reverse('issue.add_%s' % t, args=[self.journal.pk]), user=self.user).forms['issue-form']
form['total_documents'] = '16'
form.set('ctrl_vocabulary', 'decs')
form['number'] = ''
form['volume'] = ''
form['publication_start_month'] = '9'
form['publication_end_month'] = '11'
form['publication_year'] = '2012'
form['is_marked_up'] = False
form['editorial_standard'] = 'other'
response = form.submit()
if t == 'supplement':
self.assertIn('There are some errors or missing data.', response.body)
elif t == 'special':
# for t=='special' -> number field will be overwrited it 'spe' text
pass
else: # regular
self.assertIn('You must complete at least one of two fields volume or number.', response.body)
self.assertTemplateUsed(response, 'journalmanager/add_issue_%s.html' % t)
def test_POST_workflow_with_invalid_formdata(self):
"""
When an invalid form is submited, no action is taken, the
form is rendered again and an alert is shown with the message
``There are some errors or missing data``.
"""
perm_issue_change = _makePermission(perm='add_issue',
model='issue', app_label='journalmanager')
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_issue_change)
self.user.user_permissions.add(perm_issue_list)
for t in ['regular', 'supplement', 'special']:
form = self.app.get(reverse('issue.add_%s' % t, args=[self.journal.pk]), user=self.user).forms['issue-form']
form['total_documents'] = '16'
form.set('ctrl_vocabulary', 'decs')
form['number'] = '3'
form['volume'] = ''
form['is_marked_up'] = False
form['editorial_standard'] = 'other'
response = form.submit()
self.assertIn('There are some errors or missing data.', response.body)
self.assertTemplateUsed(response, 'journalmanager/add_issue_%s.html' % t)
def test_POST_workflow_with_exist_year_number_volume_on_the_same_journal(self):
"""
Asserts if any message error is displayed while trying to insert a duplicate
Year, Number and Volume issue object from a specific Journal
"""
perm_issue_change = _makePermission(perm='add_issue',
model='issue', app_label='journalmanager')
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_issue_change)
self.user.user_permissions.add(perm_issue_list)
for t in ['regular', 'special']:
issue = modelfactories.IssueFactory(journal=self.journal, suppl_text='', type=t)
form = self.app.get(reverse('issue.add_%s' % t, args=[self.journal.pk]), user=self.user).forms['issue-form']
form['total_documents'] = '16'
form.set('ctrl_vocabulary', 'decs')
form['number'] = str(issue.number)
form['volume'] = str(issue.volume)
form['publication_start_month'] = '9'
form['publication_end_month'] = '11'
form['publication_year'] = str(issue.publication_year)
form['is_marked_up'] = False
form['editorial_standard'] = 'other'
response = form.submit()
if t in ('regular',):
# for t == 'special' number field will be overwrited in clean_number method,
# so will be a redirecto (http 302) because save was succesfully.
# for other types, will raise a validations error
self.assertIn('There are some errors or missing data.', response.body)
self.assertIn('Issue with this Year and (Volume or Number) already exists for this Journal', response.body)
self.assertTemplateUsed(response, 'journalmanager/add_issue_%s.html' % t)
else:
self.assertEqual(302, response.status_code)
self.assertIn(reverse('issue.index', args=[issue.journal.pk]), response.location)
self.assertEqual('', response.body)
def test_POST_workflow_with_exist_year_number_volume_suppl_text_on_the_same_journal(self):
"""
Asserts if any message error is displayed while trying to insert a duplicate
Year, Number and Volume issue object from a specific Journal
"""
perm_issue_change = _makePermission(perm='add_issue',
model='issue', app_label='journalmanager')
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_issue_change)
self.user.user_permissions.add(perm_issue_list)
issue = modelfactories.IssueFactory(journal=self.journal, suppl_text='1', volume='1', number='', type='supplement')
form = self.app.get(reverse('issue.add_supplement', args=[self.journal.pk]), user=self.user).forms['issue-form']
form['total_documents'] = '16'
form.set('ctrl_vocabulary', 'decs')
form['number'] = str(issue.number)
form['volume'] = str(issue.volume)
form['suppl_text'] = issue.suppl_text
form['publication_start_month'] = '9'
form['publication_end_month'] = '11'
form['publication_year'] = str(issue.publication_year)
form['is_marked_up'] = False
form['editorial_standard'] = 'other'
response = form.submit()
self.assertIn('There are some errors or missing data.', response.body)
self.assertIn('Issue with this Year and (Volume or Number) already exists for this Journal', response.body)
self.assertTemplateUsed(response, 'journalmanager/add_issue_supplement.html')
def test_issues_can_be_edited(self):
perm_issue_change = _makePermission(perm='add_issue',
model='issue', app_label='journalmanager')
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_issue_change)
self.user.user_permissions.add(perm_issue_list)
for t in ['regular', 'supplement', 'special']:
issue = modelfactories.IssueFactory(journal=self.journal, suppl_text='', type=t)
form = self.app.get(reverse('issue.edit', args=[self.journal.pk, issue.pk]), user=self.user).forms['issue-form']
form['total_documents'] = '99'
if t == 'supplement':
form['suppl_type'] = 'volume'
form['suppl_text'] = 'suppl.XX'
form['volume'] = '99'
form['number'] = ''
response = form.submit().follow()
self.assertIn('Saved.', response.body)
self.assertTemplateUsed(response, 'journalmanager/issue_list.html')
def test_form_enctype_must_be_multipart_formdata(self):
"""
Asserts that the enctype attribute of the issue form is
``multipart/form-data``
"""
perm_issue_change = _makePermission(perm='add_issue',
model='issue', app_label='journalmanager')
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_issue_change)
self.user.user_permissions.add(perm_issue_list)
for t in ['regular', 'supplement', 'special']:
form = self.app.get(reverse('issue.add_%s' % t,
args=[self.journal.pk]), user=self.user).forms['issue-form']
self.assertEqual(form.enctype, 'multipart/form-data')
def test_form_action_must_be_empty(self):
"""
Asserts that the action attribute of the issue form is
empty. This is needed because the same form is used to add
a new or edit an existing entry.
"""
perm_issue_change = _makePermission(perm='add_issue',
model='issue', app_label='journalmanager')
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_issue_change)
self.user.user_permissions.add(perm_issue_list)
for t in ['regular', 'supplement', 'special']:
form = self.app.get(reverse('issue.add_%s' % t,
args=[self.journal.pk]), user=self.user).forms['issue-form']
self.assertEqual(form.action, '')
def test_form_method_must_be_post(self):
"""
Asserts that the method attribute of the issue form is
``POST``.
"""
perm_issue_change = _makePermission(perm='add_issue',
model='issue', app_label='journalmanager')
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_issue_change)
self.user.user_permissions.add(perm_issue_list)
for t in ['regular', 'supplement', 'special']:
form = self.app.get(reverse('issue.add_%s' % t,
args=[self.journal.pk]), user=self.user).forms['issue-form']
self.assertEqual(form.method.lower(), 'post')
def test_sections_must_not_be_trashed(self):
"""
Only valid sections must be available for the user to
bind to a issue.
"""
perm_issue_change = _makePermission(perm='add_issue',
model='issue', app_label='journalmanager')
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_issue_change)
self.user.user_permissions.add(perm_issue_list)
trashed_section = modelfactories.SectionFactory.create(
journal=self.journal, is_trashed=True)
for t in ['regular', 'supplement', 'special']:
form = self.app.get(reverse('issue.add_%s' % t,
args=[self.journal.pk]), user=self.user).forms['issue-form']
self.assertRaises(ValueError,
lambda: form.set('section', str(trashed_section.pk)))
class SearchFormTests(WebTest):
def setUp(self):
self.user = modelfactories.UserFactory(is_active=True)
perm = _makePermission(perm='list_journal', model='journal')
self.user.user_permissions.add(perm)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=True)
def test_basic_struture(self):
"""
Just to make sure that the required hidden fields are all
present.
All the management fields from inlineformsets used in this
form should be part of this test.
"""
page = self.app.get(reverse('index'), user=self.user)
page.mustcontain('list_model', 'q')
self.assertTemplateUsed(page, 'journalmanager/home_journal.html')
def test_form_enctype_must_be_urlencoded(self):
"""
Asserts that the enctype attribute of the search form is
``application/x-www-form-urlencoded``
"""
form = self.app.get(reverse('index'),
user=self.user).forms['search-form']
self.assertEqual(form.enctype, 'application/x-www-form-urlencoded')
def test_form_action_must_be_empty(self):
"""
Asserts that the action attribute of the search form is
the journal home.
"""
form = self.app.get(reverse('index'),
user=self.user).forms['search-form']
self.assertEqual(form.action, '')
def test_form_method_must_be_get(self):
"""
Asserts that the method attribute of the search form is
``GET``.
"""
form = self.app.get(reverse('index'),
user=self.user).forms['search-form']
self.assertEqual(form.method.lower(), 'get')
def test_GET_search_journal(self):
"""
Asserts that the search return the correct journal list
"""
journal = modelfactories.JournalFactory.create()
journal.join(self.collection, self.user)
page = self.app.get(reverse('journal.index') + '?q=Arquivos',
user=self.user)
self.assertIn('ABCD. Arquivos Brasileiros de Cirurgia Digestiva (S\xc3\xa3o Paulo)',
page.body)
def test_GET_search_sponsor(self):
"""
Asserts that the search return the correct sponsor list
"""
perm = _makePermission(perm='list_sponsor', model='sponsor',
app_label='journalmanager')
self.user.user_permissions.add(perm)
sponsor = modelfactories.SponsorFactory.create()
sponsor.collections.add(self.collection)
page = self.app.get(reverse('sponsor.index') + '?q=Amparo',
user=self.user)
self.assertIn('Funda\xc3\xa7\xc3\xa3o de Amparo a Pesquisa do Estado de S\xc3\xa3o Paulo',
page.body)
def test_GET_journal_filter_by_letter(self):
"""
Asserts that the filter with letter return the correct journal list
"""
perm = _makePermission(perm='list_journal', model='journal',
app_label='journalmanager')
self.user.user_permissions.add(perm)
journal = modelfactories.JournalFactory.create()
journal.join(self.collection, self.user)
page = self.app.get(reverse('journal.index') + '?letter=A', user=self.user)
self.assertIn('ABCD. Arquivos Brasileiros de Cirurgia Digestiva (S\xc3\xa3o Paulo)',
page.body)
def test_GET_sponsor_filter_by_letter(self):
"""
Asserts that the filter with letter return the correct journal list
"""
perm = _makePermission(perm='list_sponsor', model='sponsor',
app_label='journalmanager')
self.user.user_permissions.add(perm)
sponsor = modelfactories.SponsorFactory.create()
sponsor.collections.add(self.collection)
page = self.app.get(reverse('sponsor.index') + '?letter=F', user=self.user)
self.assertIn('Funda\xc3\xa7\xc3\xa3o de Amparo a Pesquisa do Estado de S\xc3\xa3o Paulo',
page.body)
class SectionTitleFormValidationTests(TestCase):
def test_same_titles_in_different_languages_must_be_valid(self):
user = modelfactories.UserFactory(is_active=True)
collection = modelfactories.CollectionFactory.create()
collection.add_user(user, is_manager=True)
journal = modelfactories.JournalFactory.create()
journal.join(collection, user)
language = modelfactories.LanguageFactory.create(iso_code='en',
name='english')
language2 = modelfactories.LanguageFactory.create(iso_code='pt',
name='portuguese')
journal.languages.add(language)
journal.languages.add(language2)
section = modelfactories.SectionFactory(journal=journal)
section.add_title('Original Article', language=language)
post_dict = {
u'titles-INITIAL_FORMS': 0,
u'titles-TOTAL_FORMS': 1,
u'legacy_code': u'',
u'titles-0-language': unicode(language2.pk),
u'titles-0-title': u'Original Article',
}
section_forms = forms.get_all_section_forms(post_dict,
journal=journal, section=section)
self.assertTrue(section_forms['section_form'].is_valid())
self.assertTrue(section_forms['section_title_formset'].is_valid())
class JournalEditorsTests(WebTest):
def setUp(self):
self.user = modelfactories.UserFactory(is_active=True)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=True)
self.journal = modelfactories.JournalFactory.create()
self.journal.join(self.collection, self.user)
perm_journal_list = _makePermission(perm='list_journal',
model='journal',
app_label='journalmanager')
self.user.user_permissions.add(perm_journal_list)
def test_form_ectype_must_be_urlencoded(self):
from waffle import Flag
Flag.objects.create(name='editor_manager', everyone=True)
form = self.app.get(reverse('journal_editors.index',
args=[self.journal.pk]), user=self.user).forms['add-editor']
self.assertEqual(form.enctype, 'application/x-www-form-urlencoded')
def test_form_method_must_be_post(self):
"""
Asserts that the method attribute of the ahead form is
``POST``.
"""
from waffle import Flag
Flag.objects.create(name='editor_manager', everyone=True)
form = self.app.get(reverse('journal_editors.index',
args=[self.journal.pk]), user=self.user).forms['add-editor']
self.assertEqual(form.method.lower(), 'post')
def test_form_action_must_not_be_empty(self):
from waffle import Flag
Flag.objects.create(name='editor_manager', everyone=True)
form = self.app.get(reverse('journal_editors.index',
args=[self.journal.pk]), user=self.user).forms['add-editor']
r = reverse('journal_editors.add', args=[self.journal.pk])
self.assertEqual(form.action, r)
def test_form_adding_an_editor_with_a_valid_username(self):
from waffle import Flag
Flag.objects.create(name='editor_manager', everyone=True)
perm_journal_change = _makePermission(perm='change_journal',
model='journal',
app_label='journalmanager')
self.user.user_permissions.add(perm_journal_change)
form = self.app.get(reverse('journal_editors.index',
args=[self.journal.pk]), user=self.user).forms['add-editor']
form['query'] = self.user.username
response = form.submit()
self.assertIn('Now, %s is an editor of this journal.' % self.user.username, response.body)
def test_form_adding_an_editor_with_a_invalid_username(self):
from waffle import Flag
Flag.objects.create(name='editor_manager', everyone=True)
perm_journal_change = _makePermission(perm='change_journal',
model='journal',
app_label='journalmanager')
self.user.user_permissions.add(perm_journal_change)
form = self.app.get(reverse('journal_editors.index',
args=[self.journal.pk]), user=self.user).forms['add-editor']
form['query'] = 'fakeuser'
response = form.submit()
self.assertIn('User fakeuser does not exists', response.body)
class AheadFormTests(WebTest):
def setUp(self):
self.user = modelfactories.UserFactory(is_active=True)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=True)
self.journal = modelfactories.JournalFactory.create()
self.journal.join(self.collection, self.user)
def test_form_enctype_must_be_urlencoded(self):
"""
Asserts that the enctype attribute of the ahead form is
``application/x-www-form-urlencoded``
"""
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
perm_journal_change = _makePermission(perm='change_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_journal_change)
self.user.user_permissions.add(perm_issue_list)
form = self.app.get(reverse('issue.index', args=[self.journal.pk]),
user=self.user).forms['ahead-form']
self.assertEqual(form.enctype, 'application/x-www-form-urlencoded')
def test_form_action_must_be_empty(self):
"""
Asserts that the action attribute of the ahead form is
empty.
"""
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
perm_journal_change = _makePermission(perm='change_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_journal_change)
self.user.user_permissions.add(perm_issue_list)
form = self.app.get(reverse('issue.index', args=[self.journal.pk]),
user=self.user).forms['ahead-form']
self.assertEqual(form.action, '')
def test_form_method_must_be_post(self):
"""
Asserts that the method attribute of the ahead form is
``POST``.
"""
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
perm_journal_change = _makePermission(perm='change_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_journal_change)
self.user.user_permissions.add(perm_issue_list)
form = self.app.get(reverse('issue.index', args=[self.journal.pk]),
user=self.user).forms['ahead-form']
self.assertEqual(form.method.lower(), 'post')
def test_basic_structure(self):
perm_issue_list = _makePermission(perm='list_issue',
model='issue', app_label='journalmanager')
perm_journal_change = _makePermission(perm='change_issue',
model='issue', app_label='journalmanager')
self.user.user_permissions.add(perm_journal_change)
self.user.user_permissions.add(perm_issue_list)
form = self.app.get(reverse('issue.index', args=[self.journal.pk]),
user=self.user).forms['ahead-form']
self.assertIn('csrfmiddlewaretoken', form.fields)
class PressReleaseFormTests(WebTest):
def setUp(self):
self.user = modelfactories.UserFactory(is_active=True)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=True)
self.journal = modelfactories.JournalFactory.create()
self.journal.join(self.collection, self.user)
def test_form_enctype_must_be_urlencoded(self):
"""
Asserts that the enctype attribute of the pressrelease form is
``application/x-www-form-urlencoded``
"""
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_add)
self.user.user_permissions.add(perm_prelease_list)
form = self.app.get(reverse('prelease.add', args=[self.journal.pk]),
user=self.user).forms['prelease-form']
self.assertEqual(form.enctype, 'application/x-www-form-urlencoded')
def test_form_action_must_be_empty(self):
"""
Asserts that the action attribute of the press release form is
empty.
"""
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_list)
self.user.user_permissions.add(perm_prelease_add)
form = self.app.get(reverse('prelease.add', args=[self.journal.pk]),
user=self.user).forms['prelease-form']
self.assertEqual(form.action, '')
def test_form_method_must_be_post(self):
"""
Asserts that the method attribute of the press release form is
``POST``.
"""
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_list)
self.user.user_permissions.add(perm_prelease_add)
form = self.app.get(reverse('prelease.add', args=[self.journal.pk]),
user=self.user).forms['prelease-form']
self.assertEqual(form.method.lower(), 'post')
def test_basic_structure(self):
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_add)
self.user.user_permissions.add(perm_prelease_list)
form = self.app.get(reverse('prelease.add', args=[self.journal.pk]),
user=self.user).forms['prelease-form']
self.assertIn('csrfmiddlewaretoken', form.fields)
def test_POST_pressrelease_with_valid_data(self):
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_add)
self.user.user_permissions.add(perm_prelease_list)
issue = modelfactories.IssueFactory(journal=self.journal)
language = modelfactories.LanguageFactory(iso_code='en',
name='english')
self.journal.languages.add(language)
form = self.app.get(reverse('prelease.add', args=[self.journal.pk]),
user=self.user).forms['prelease-form']
form.set('issue', issue.pk)
form['doi'] = "http://dx.doi.org/10.1590/S0102-86502013001300002"
form['article-0-article_pid'] = 'S0102-86502013001300002'
form.set('translation-0-language', language.pk)
form['translation-0-title'] = "Press Relasea MFP"
form['translation-0-content'] = "<p>Body of some HTML</p>"
response = form.submit().follow()
self.assertIn('Saved.', response.body)
def test_POST_pressrelease_with_invalid_data(self):
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_add)
self.user.user_permissions.add(perm_prelease_list)
language = modelfactories.LanguageFactory(iso_code='en',
name='english')
self.journal.languages.add(language)
form = self.app.get(reverse('prelease.add', args=[self.journal.pk]),
user=self.user).forms['prelease-form']
form['doi'] = "http://dx.doi.org/10.1590/S0102-86502013001300002"
form['article-0-article_pid'] = 'S0102-86502013001300002'
form.set('translation-0-language', language.pk)
form['translation-0-title'] = "Press Relasea MFP"
form['translation-0-content'] = "<p>Body of some HTML</p>"
response = form.submit()
self.assertIn('There are some errors or missing data.', response.body)
self.assertTemplateUsed(response,
'journalmanager/add_pressrelease.html')
def test_pressrelease_if_on_edit_form_it_has_article_pid(self):
perm_prelease_edit = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_edit)
ahead_prelease = modelfactories.AheadPressReleaseFactory()
article_prelease = modelfactories.PressReleaseArticleFactory(
press_release=ahead_prelease,
article_pid="S0102-311X2013000300001")
form_ahead_prelease = self.app.get(reverse('aprelease.edit',
args=[self.journal.pk, ahead_prelease.pk]),
user=self.user).forms['prelease-form']
self.assertEqual(form_ahead_prelease['article-0-article_pid'].value, "S0102-311X2013000300001")
def test_POST_pressrelease_must_contain_at_least_one_press_release_translation(self):
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_add)
self.user.user_permissions.add(perm_prelease_list)
issue = modelfactories.IssueFactory(journal=self.journal)
language = modelfactories.LanguageFactory(iso_code='en',
name='english')
self.journal.languages.add(language)
form = self.app.get(reverse('prelease.add', args=[self.journal.pk]),
user=self.user).forms['prelease-form']
form.set('issue', issue.pk)
form['doi'] = "http://dx.doi.org/10.1590/S0102-86502013001300002"
form['article-0-article_pid'] = 'S0102-86502013001300002'
response = form.submit()
self.assertIn('There are some errors or missing data.', response.body)
self.assertIn('Please fill in at least one form', response.body)
self.assertTemplateUsed(response,
'journalmanager/add_pressrelease.html')
def test_pressrelease_translations_language_filtering(self):
language1 = modelfactories.LanguageFactory.create(iso_code='en',
name='english')
language2 = modelfactories.LanguageFactory.create(iso_code='pt',
name='portuguese')
journal = modelfactories.JournalFactory.create()
journal.languages.add(language1)
testing_form = forms.PressReleaseTranslationForm(journal=journal)
res_qset = testing_form['language'].field.queryset
self.assertEqual(len(res_qset), 1)
self.assertEqual(res_qset[0], language1)
def test_pressrelease_translations_raises_TypeError_while_missing_journal(self):
self.assertRaises(
TypeError,
lambda: forms.PressReleaseTranslationForm())
def test_get_all_pressrelease_forms(self):
language = modelfactories.LanguageFactory.create(iso_code='en',
name='english')
journal = modelfactories.JournalFactory.create()
journal.languages.add(language)
pr_forms = forms.get_all_pressrelease_forms(
{}, journal, models.PressRelease())
self.assertEqual(
sorted(pr_forms.keys()),
sorted([
'pressrelease_form',
'translation_formset',
'article_formset',
])
)
def test_get_all_pressrelease_language_filtering(self):
language = modelfactories.LanguageFactory.create(iso_code='en',
name='english')
journal = modelfactories.JournalFactory.create()
journal.languages.add(language)
pr_forms = forms.get_all_pressrelease_forms(
{}, journal, models.PressRelease())
res_qset = pr_forms['translation_formset'][0].fields['language'].queryset
self.assertEqual(len(res_qset), 1)
self.assertEqual(res_qset[0], language)
def test_issues_must_not_be_trashed(self):
"""
Only valid issues must be available for the user to
bind to a pressrelease.
"""
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_list)
self.user.user_permissions.add(perm_prelease_add)
trashed_issue = modelfactories.IssueFactory.create(
journal=self.journal, is_trashed=True)
language = modelfactories.LanguageFactory(iso_code='en',
name='english')
self.journal.languages.add(language)
form = self.app.get(reverse('prelease.add',
args=[self.journal.pk]),
user=self.user).forms['prelease-form']
self.assertRaises(ValueError,
lambda: form.set('issue', str(trashed_issue.pk)))
class AheadPressReleaseFormTests(WebTest):
def setUp(self):
self.user = modelfactories.UserFactory(is_active=True)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=True)
self.journal = modelfactories.JournalFactory()
self.journal.join(self.collection, self.user)
def test_form_enctype_must_be_urlencoded(self):
"""
Asserts that the enctype attribute of the pressrelease form is
``application/x-www-form-urlencoded``
"""
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_add)
self.user.user_permissions.add(perm_prelease_list)
form = self.app.get(reverse('aprelease.add',
args=[self.journal.pk]),
user=self.user).forms['prelease-form']
self.assertEqual(form.enctype, 'application/x-www-form-urlencoded')
def test_form_action_must_be_empty(self):
"""
Asserts that the action attribute of the press release form is
empty.
"""
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_list)
self.user.user_permissions.add(perm_prelease_add)
form = self.app.get(reverse('aprelease.add', args=[self.journal.pk]),
user=self.user).forms['prelease-form']
self.assertEqual(form.action, '')
def test_form_method_must_be_post(self):
"""
Asserts that the method attribute of the press release form is
``POST``.
"""
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_list)
self.user.user_permissions.add(perm_prelease_add)
form = self.app.get(reverse('aprelease.add',
args=[self.journal.pk]),
user=self.user).forms['prelease-form']
self.assertEqual(form.method.lower(), 'post')
def test_basic_structure(self):
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_add)
self.user.user_permissions.add(perm_prelease_list)
form = self.app.get(reverse('aprelease.add',
args=[self.journal.pk]),
user=self.user).forms['prelease-form']
self.assertIn('csrfmiddlewaretoken', form.fields)
def test_POST_pressrelease_with_valid_data(self):
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_add)
self.user.user_permissions.add(perm_prelease_list)
language = modelfactories.LanguageFactory(iso_code='en',
name='english')
self.journal.languages.add(language)
form = self.app.get(reverse('aprelease.add',
args=[self.journal.pk]),
user=self.user).forms['prelease-form']
form['doi'] = "http://dx.doi.org/10.1590/S0102-86502013001300002"
form['article-0-article_pid'] = 'S0102-86502013001300002'
form.set('translation-0-language', language.pk)
form['translation-0-title'] = "Press Relasea MFP"
form['translation-0-content'] = "<p>Body of some HTML</p>"
response = form.submit().follow()
self.assertIn('Saved.', response.body)
def test_POST_pressrelease_with_invalid_data(self):
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_add)
self.user.user_permissions.add(perm_prelease_list)
language = modelfactories.LanguageFactory(iso_code='en',
name='english')
self.journal.languages.add(language)
form = self.app.get(reverse('aprelease.add',
args=[self.journal.pk]),
user=self.user).forms['prelease-form']
form['doi'] = "http://dx.doi.org/10.1590/S0102-86502013001300002"
form['article-0-article_pid'] = 'S0102-86502013001300002'
# missing translation language
form['translation-0-title'] = "Press Relasea MFP"
form['translation-0-content'] = "<p>Body of some HTML</p>"
response = form.submit()
self.assertIn('There are some errors or missing data.', response.body)
self.assertTemplateUsed(response,
'journalmanager/add_pressrelease.html')
def test_POST_pressrelease_must_contain_at_least_one_press_release_translation(self):
perm_prelease_list = _makePermission(perm='list_pressrelease',
model='pressrelease',
app_label='journalmanager')
perm_prelease_add = _makePermission(perm='add_pressrelease',
model='pressrelease',
app_label='journalmanager')
self.user.user_permissions.add(perm_prelease_add)
self.user.user_permissions.add(perm_prelease_list)
language = modelfactories.LanguageFactory(iso_code='en',
name='english')
self.journal.languages.add(language)
form = self.app.get(reverse('aprelease.add',
args=[self.journal.pk]),
user=self.user).forms['prelease-form']
form['doi'] = "http://dx.doi.org/10.1590/S0102-86502013001300002"
form['article-0-article_pid'] = 'S0102-86502013001300002'
response = form.submit()
self.assertIn('There are some errors or missing data.', response.body)
self.assertIn('Please fill in at least one form', response.body)
self.assertTemplateUsed(response,
'journalmanager/add_pressrelease.html')
def test_pressrelease_translations_language_filtering(self):
language1 = modelfactories.LanguageFactory.create(iso_code='en',
name='english')
language2 = modelfactories.LanguageFactory.create(iso_code='pt',
name='portuguese')
journal = modelfactories.JournalFactory.create()
journal.languages.add(language1)
testing_form = forms.PressReleaseTranslationForm(journal=journal)
res_qset = testing_form['language'].field.queryset
self.assertEqual(len(res_qset), 1)
self.assertEqual(res_qset[0], language1)
def test_pressrelease_translations_raises_TypeError_while_missing_journal(self):
self.assertRaises(
TypeError,
lambda: forms.PressReleaseTranslationForm())
def test_get_all_pressrelease_forms(self):
language = modelfactories.LanguageFactory.create(iso_code='en',
name='english')
journal = modelfactories.JournalFactory.create()
journal.languages.add(language)
pr_forms = forms.get_all_pressrelease_forms(
{}, journal, models.PressRelease())
self.assertEqual(
sorted(pr_forms.keys()),
sorted([
'pressrelease_form',
'translation_formset',
'article_formset',
])
)
def test_get_all_ahead_pressrelease_language_filtering(self):
language = modelfactories.LanguageFactory.create(iso_code='en',
name='english')
journal = modelfactories.JournalFactory.create()
journal.languages.add(language)
pr_forms = forms.get_all_ahead_pressrelease_forms(
{}, journal, models.AheadPressRelease())
res_qset = pr_forms['translation_formset'][0].fields['language'].queryset
self.assertEqual(len(res_qset), 1)
self.assertEqual(res_qset[0], language)
| 41.755089
| 124
| 0.5822
| 13,122
| 131,278
| 5.63062
| 0.04763
| 0.027935
| 0.01949
| 0.037355
| 0.893145
| 0.880165
| 0.871571
| 0.857197
| 0.846315
| 0.840143
| 0
| 0.011424
| 0.3099
| 131,278
| 3,143
| 125
| 41.768374
| 0.804106
| 0.076578
| 0
| 0.833952
| 0
| 0.002319
| 0.183307
| 0.037164
| 0
| 0
| 0
| 0
| 0.083488
| 1
| 0.068646
| false
| 0.001391
| 0.012523
| 0
| 0.089054
| 0.001855
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
63048439757c5f43d0baf623fa88ee2c29dae7cd
| 10,804
|
py
|
Python
|
test/robotProblemGenerator.py
|
stawo/ekabPlanner
|
63f78d4932daa1c17b1fdff30b074c8ad1a741d3
|
[
"MIT"
] | null | null | null |
test/robotProblemGenerator.py
|
stawo/ekabPlanner
|
63f78d4932daa1c17b1fdff30b074c8ad1a741d3
|
[
"MIT"
] | null | null | null |
test/robotProblemGenerator.py
|
stawo/ekabPlanner
|
63f78d4932daa1c17b1fdff30b074c8ad1a741d3
|
[
"MIT"
] | null | null | null |
#!/bin/python3
from itertools import product
def generate_planning_domain(columns, rows, filename):
print("Inizio la generazione del planning domain.\n")
#~ Inizializzo il dominio di planning
planning_domain = """(define (domain robot)
(:requirements :ekab)\n"""
#~ Genero i predicati per ogni colonna e riga
planning_domain += "\t(:predicates\n"
planning_domain += "\t\t(Columns ?x)\n"
planning_domain += "\t\t(Rows ?x)\n"
for column in range(columns):
planning_domain += "\t\t(Column"+str(column)+" ?x)\n"
for column in range(columns):
planning_domain += "\t\t(RightOf"+str(column)+" ?x)\n"
for column in range(columns):
planning_domain += "\t\t(LeftOf"+str(column+1)+" ?x)\n"
for row in range(rows):
planning_domain += "\t\t(Row"+str(row)+" ?x)\n"
for row in range(rows):
planning_domain += "\t\t(AboveOf"+str(row)+" ?x)\n"
for row in range(rows):
planning_domain += "\t\t(BelowOf"+str(row+1)+" ?x)\n"
#~ Chiudo la parentesi di predicates
planning_domain += "\t)\n"
#~ Creo il file e gli scrivo dentro planning_domain
output_file = open(filename, "w")
output_file.write(planning_domain)
output_file.close()
planning_domain = ""
print("Finito la sezione :predicates\n")
#~ Genero gli assiomi
planning_domain += "\t(:axioms\n"
planning_domain += "\t\t(isA RightOf0 Columns)\n"
for column in range(columns-1):
planning_domain += "\t\t(isA RightOf"+str(column+1)+" RightOf"+str(column)+")\n"
planning_domain += "\t\t(isA LeftOf"+str(columns)+" Columns)\n"
for column in range(columns-1):
planning_domain += "\t\t(isA LeftOf"+str(columns-column-1)+" LeftOf"+str(columns-column)+")\n"
planning_domain += "\t\t(isA AboveOf0 Rows)\n"
for row in range(rows-1):
planning_domain += "\t\t(isA AboveOf"+str(row+1)+" AboveOf"+str(row)+")\n"
planning_domain += "\t\t(isA BelowOf"+str(rows)+" Rows)\n"
for row in range(rows-1):
planning_domain += "\t\t(isA BelowOf"+str(rows-row-1)+" BelowOf"+str(rows-row)+")\n"
for column in range(1,columns):
planning_domain += "\t\t(isA LeftOf"+str(column)+" (not RightOf"+str(column)+"))\n"
for row in range(1,rows):
planning_domain += "\t\t(isA AboveOf"+str(row)+" (not BelowOf"+str(row)+"))\n"
#~ Chiudo la parentesi di axioms
planning_domain += "\t)\n"
#~ Creo il file e gli scrivo dentro planning_domain
output_file = open(filename, "a")
output_file.write(planning_domain)
output_file.close()
planning_domain = ""
print("Finito la sezione :axioms\n")
#~ Genero le rules che muovono il robot
planning_domain += "\t(:rule ruleRight\n"
planning_domain += "\t\t:condition (mko(Columns ?x))\n"
planning_domain += "\t\t:action moveRight\n"
planning_domain += "\t)\n"
planning_domain += "\t(:rule ruleLeft\n"
planning_domain += "\t\t:condition (mko(Columns ?x))\n"
planning_domain += "\t\t:action moveLeft\n"
planning_domain += "\t)\n"
planning_domain += "\t(:rule ruleUp\n"
planning_domain += "\t\t:condition (mko(Rows ?x))\n"
planning_domain += "\t\t:action moveUp\n"
planning_domain += "\t)\n"
planning_domain += "\t(:rule ruleDown\n"
planning_domain += "\t\t:condition (mko(Rows ?x))\n"
planning_domain += "\t\t:action moveDown\n"
planning_domain += "\t)\n"
#~ Creo il file e gli scrivo dentro planning_domain
output_file = open(filename, "a")
output_file.write(planning_domain)
output_file.close()
planning_domain = ""
print("Finito la sezione :rule\n")
#~ Genero le azioni
#~ Genero moveRight
planning_domain += "\t(:action moveRight\n"
planning_domain += "\t\t:parameters (?x)\n"
planning_domain += "\t\t:effects \n"
for column in range(columns-1):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (RightOf"+str(column)+" ?x))\n"
planning_domain += "\t\t:add ((RightOf"+str(column+1)+" ?x))\n"
#~ planning_domain += "\t\t:delete ()\n"
planning_domain += "\t\t)\n"
for column in range(1,columns):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (LeftOf"+str(column)+" ?x))\n"
planning_domain += "\t\t:add ((LeftOf"+str(column+1)+" ?x))\n"
planning_domain += "\t\t:delete ((LeftOf"+str(column)+" ?x))\n"
planning_domain += "\t\t)\n"
for column in range(columns-1):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (Column"+str(column)+" ?x))\n"
planning_domain += "\t\t:add ((Column"+str(column+1)+" ?x))\n"
planning_domain += "\t\t:delete ((Column"+str(column)+" ?x))\n"
planning_domain += "\t\t)\n"
for column in range(columns-1):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (and (RightOf"+str(column)+" ?x) (LeftOf"+str(column+1)+" ?x)))\n"
planning_domain += "\t\t:add ((Column"+str(column+1)+" ?x))\n"
#~ planning_domain += "\t\t:delete ((Column"+str(column)+" ?x))\n"
planning_domain += "\t\t)\n"
#~ Chiudo la parentesi di moveRight
planning_domain += "\t)\n"
#~ Genero moveLeft
planning_domain += "\t(:action moveLeft\n"
planning_domain += "\t\t:parameters (?x)\n"
planning_domain += "\t\t:effects \n"
for column in range(2,columns+1):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (LeftOf"+str(column)+" ?x))\n"
planning_domain += "\t\t:add ((LeftOf"+str(column-1)+" ?x))\n"
#~ planning_domain += "\t\t:delete ()\n"
planning_domain += "\t\t)\n"
for column in range(1,columns):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (RightOf"+str(column)+" ?x))\n"
planning_domain += "\t\t:add ((RightOf"+str(column-1)+" ?x))\n"
planning_domain += "\t\t:delete ((RightOf"+str(column)+" ?x))\n"
planning_domain += "\t\t)\n"
for column in range(1,columns):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (Column"+str(column)+" ?x))\n"
planning_domain += "\t\t:add ((Column"+str(column-1)+" ?x))\n"
planning_domain += "\t\t:delete ((Column"+str(column)+" ?x))\n"
planning_domain += "\t\t)\n"
for column in range(1,columns):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (and (RightOf"+str(column)+" ?x) (LeftOf"+str(column+1)+" ?x)))\n"
planning_domain += "\t\t:add ((Column"+str(column-1)+" ?x))\n"
#~ planning_domain += "\t\t:delete ((Column"+str(column)+" ?x))\n"
planning_domain += "\t\t)\n"
#~ Chiudo la parentesi di moveLeft
planning_domain += "\t)\n"
#~ Genero moveUp
planning_domain += "\t(:action moveUp\n"
planning_domain += "\t\t:parameters (?x)\n"
planning_domain += "\t\t:effects \n"
for row in range(rows-1):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (AboveOf"+str(row)+" ?x))\n"
planning_domain += "\t\t:add ((AboveOf"+str(row+1)+" ?x))\n"
#~ planning_domain += "\t\t:delete ()\n"
planning_domain += "\t\t)\n"
for row in range(1,rows):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (BelowOf"+str(row)+" ?x))\n"
planning_domain += "\t\t:add ((BelowOf"+str(row+1)+" ?x))\n"
planning_domain += "\t\t:delete ((BelowOf"+str(row)+" ?x))\n"
planning_domain += "\t\t)\n"
for row in range(rows-1):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (Row"+str(row)+" ?x))\n"
planning_domain += "\t\t:add ((Row"+str(row+1)+" ?x))\n"
planning_domain += "\t\t:delete ((Row"+str(row)+" ?x))\n"
planning_domain += "\t\t)\n"
for row in range(rows-1):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (and (AboveOf"+str(row)+" ?x) (BelowOf"+str(row+1)+" ?x)))\n"
planning_domain += "\t\t:add ((Row"+str(row+1)+" ?x))\n"
#~ planning_domain += "\t\t:delete ((Row"+str(row)+" ?x))\n"
planning_domain += "\t\t)\n"
#~ Chiudo la parentesi di moveUp
planning_domain += "\t)\n"
#~ Genero moveDown
planning_domain += "\t(:action moveDown\n"
planning_domain += "\t\t:parameters (?x)\n"
planning_domain += "\t\t:effects \n"
for row in range(2,rows+1):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (BelowOf"+str(row)+" ?x))\n"
planning_domain += "\t\t:add ((BelowOf"+str(row-1)+" ?x))\n"
#~ planning_domain += "\t\t:delete ()\n"
planning_domain += "\t\t)\n"
for row in range(1,rows):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (AboveOf"+str(row)+" ?x))\n"
planning_domain += "\t\t:add ((AboveOf"+str(row-1)+" ?x))\n"
planning_domain += "\t\t:delete ((AboveOf"+str(row)+" ?x))\n"
planning_domain += "\t\t)\n"
for row in range(1,rows):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (Row"+str(row)+" ?x))\n"
planning_domain += "\t\t:add ((Row"+str(row-1)+" ?x))\n"
planning_domain += "\t\t:delete ((Row"+str(row)+" ?x))\n"
planning_domain += "\t\t)\n"
for row in range(1,rows):
planning_domain += "\t\t(\n"
planning_domain += "\t\t:condition (mko (and (AboveOf"+str(row)+" ?x) (BelowOf"+str(row+1)+" ?x)))\n"
planning_domain += "\t\t:add ((Row"+str(row-1)+" ?x))\n"
#~ planning_domain += "\t\t:delete ((Row"+str(row)+" ?x))\n"
planning_domain += "\t\t)\n"
#~ Chiudo la parentesi di moveDown
planning_domain += "\t)\n"
#~ Creo il file e gli scrivo dentro planning_domain
output_file = open(filename, "a")
output_file.write(planning_domain)
output_file.close()
planning_domain = ""
print("Finito la sezione :action\n")
#~ Chiudo la parentesi di domain
planning_domain += "\n)"
#~ Creo il file e gli scrivo dentro planning_domain
output_file = open(filename, "a")
output_file.write(planning_domain)
output_file.close()
print("Finito di scrivere il dominio!\n")
def generate_planning_problem(rightOf, leftOf, aboveOf, belowOf, column, row, filename):
#~ Inizializzo il problema di planning
planning_domain = """(define (problem robotProblem)
(:domain robot)\n"""
#~ Genero gli individui in :objects
planning_domain += "\t(:objects robot)\n"
#~ Definisco :init
planning_domain += "\t(:init\n"
planning_domain += "\t\t(RightOf" + str(rightOf) + " robot)\n"
planning_domain += "\t\t(LeftOf" + str(leftOf) + " robot)\n"
planning_domain += "\t\t(AboveOf" + str(aboveOf) + " robot)\n"
planning_domain += "\t\t(BelowOf" + str(belowOf) + " robot)\n"
#~ Chiudo la parentesi di init
planning_domain += "\t)\n"
#~ Definisco il goal
planning_domain += "\t(:goal (mko (and (Column" + str(column) + " robot) (Row" + str(row) + " robot))))\n"
#~ Chiudo la parentesi di domain
planning_domain += "\n)"
#~ Creo il file e gli scrivo dentro planning_domain
output_file = open(filename, "w")
output_file.write(planning_domain)
output_file.close()
if __name__ == '__main__':
columns = 7
rows = 7
generate_planning_domain(columns = columns, rows = rows, filename = "robotDomain.pddl")
generate_planning_problem(rightOf = 2, leftOf = columns-1, aboveOf = 0, belowOf = rows-1, column = 2, row = 1, filename = "robotDomain-problem.pddl")
| 35.539474
| 150
| 0.643188
| 1,669
| 10,804
| 4.046135
| 0.065309
| 0.34207
| 0.315415
| 0.279579
| 0.817859
| 0.789427
| 0.757145
| 0.733304
| 0.714497
| 0.694506
| 0
| 0.00658
| 0.141892
| 10,804
| 303
| 151
| 35.656766
| 0.721821
| 0.123565
| 0
| 0.651961
| 1
| 0
| 0.313507
| 0.002545
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009804
| false
| 0
| 0.004902
| 0
| 0.014706
| 0.029412
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
2d77ca0a3a7e59cce31fcb1f5f89466edae11215
| 22,994
|
py
|
Python
|
sdk/python/pulumi_f5bigip/ltm/profile_http_compress.py
|
pulumi/pulumi-f5bigip
|
4bce074f8bd7cb42f359ef4814ca5b437230fd1c
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2018-12-21T23:30:33.000Z
|
2021-10-12T16:38:27.000Z
|
sdk/python/pulumi_f5bigip/ltm/profile_http_compress.py
|
pulumi/pulumi-f5bigip
|
4bce074f8bd7cb42f359ef4814ca5b437230fd1c
|
[
"ECL-2.0",
"Apache-2.0"
] | 61
|
2019-01-09T01:50:19.000Z
|
2022-03-31T15:27:17.000Z
|
sdk/python/pulumi_f5bigip/ltm/profile_http_compress.py
|
pulumi/pulumi-f5bigip
|
4bce074f8bd7cb42f359ef4814ca5b437230fd1c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-10-05T10:36:30.000Z
|
2019-10-05T10:36:30.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ProfileHttpCompressArgs', 'ProfileHttpCompress']
@pulumi.input_type
class ProfileHttpCompressArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
content_type_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
content_type_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
defaults_from: Optional[pulumi.Input[str]] = None,
uri_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
uri_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ProfileHttpCompress resource.
:param pulumi.Input[str] name: Name of the profile_httpcompress
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_type_excludes: Excludes a specified list of content types from compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_type_includes: Specifies a list of content types for compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
:param pulumi.Input[str] defaults_from: Specifies the profile that you want to use as the parent profile. Your new profile inherits all settings and values from the parent profile specified.
:param pulumi.Input[Sequence[pulumi.Input[str]]] uri_excludes: Disables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you do not want to compress.
:param pulumi.Input[Sequence[pulumi.Input[str]]] uri_includes: Enables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you want to compress.
"""
pulumi.set(__self__, "name", name)
if content_type_excludes is not None:
pulumi.set(__self__, "content_type_excludes", content_type_excludes)
if content_type_includes is not None:
pulumi.set(__self__, "content_type_includes", content_type_includes)
if defaults_from is not None:
pulumi.set(__self__, "defaults_from", defaults_from)
if uri_excludes is not None:
pulumi.set(__self__, "uri_excludes", uri_excludes)
if uri_includes is not None:
pulumi.set(__self__, "uri_includes", uri_includes)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the profile_httpcompress
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="contentTypeExcludes")
def content_type_excludes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Excludes a specified list of content types from compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
"""
return pulumi.get(self, "content_type_excludes")
@content_type_excludes.setter
def content_type_excludes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "content_type_excludes", value)
@property
@pulumi.getter(name="contentTypeIncludes")
def content_type_includes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a list of content types for compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
"""
return pulumi.get(self, "content_type_includes")
@content_type_includes.setter
def content_type_includes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "content_type_includes", value)
@property
@pulumi.getter(name="defaultsFrom")
def defaults_from(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the profile that you want to use as the parent profile. Your new profile inherits all settings and values from the parent profile specified.
"""
return pulumi.get(self, "defaults_from")
@defaults_from.setter
def defaults_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "defaults_from", value)
@property
@pulumi.getter(name="uriExcludes")
def uri_excludes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Disables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you do not want to compress.
"""
return pulumi.get(self, "uri_excludes")
@uri_excludes.setter
def uri_excludes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "uri_excludes", value)
@property
@pulumi.getter(name="uriIncludes")
def uri_includes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Enables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you want to compress.
"""
return pulumi.get(self, "uri_includes")
@uri_includes.setter
def uri_includes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "uri_includes", value)
@pulumi.input_type
class _ProfileHttpCompressState:
def __init__(__self__, *,
content_type_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
content_type_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
defaults_from: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
uri_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
uri_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering ProfileHttpCompress resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_type_excludes: Excludes a specified list of content types from compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_type_includes: Specifies a list of content types for compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
:param pulumi.Input[str] defaults_from: Specifies the profile that you want to use as the parent profile. Your new profile inherits all settings and values from the parent profile specified.
:param pulumi.Input[str] name: Name of the profile_httpcompress
:param pulumi.Input[Sequence[pulumi.Input[str]]] uri_excludes: Disables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you do not want to compress.
:param pulumi.Input[Sequence[pulumi.Input[str]]] uri_includes: Enables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you want to compress.
"""
if content_type_excludes is not None:
pulumi.set(__self__, "content_type_excludes", content_type_excludes)
if content_type_includes is not None:
pulumi.set(__self__, "content_type_includes", content_type_includes)
if defaults_from is not None:
pulumi.set(__self__, "defaults_from", defaults_from)
if name is not None:
pulumi.set(__self__, "name", name)
if uri_excludes is not None:
pulumi.set(__self__, "uri_excludes", uri_excludes)
if uri_includes is not None:
pulumi.set(__self__, "uri_includes", uri_includes)
@property
@pulumi.getter(name="contentTypeExcludes")
def content_type_excludes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Excludes a specified list of content types from compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
"""
return pulumi.get(self, "content_type_excludes")
@content_type_excludes.setter
def content_type_excludes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "content_type_excludes", value)
@property
@pulumi.getter(name="contentTypeIncludes")
def content_type_includes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a list of content types for compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
"""
return pulumi.get(self, "content_type_includes")
@content_type_includes.setter
def content_type_includes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "content_type_includes", value)
@property
@pulumi.getter(name="defaultsFrom")
def defaults_from(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the profile that you want to use as the parent profile. Your new profile inherits all settings and values from the parent profile specified.
"""
return pulumi.get(self, "defaults_from")
@defaults_from.setter
def defaults_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "defaults_from", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the profile_httpcompress
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="uriExcludes")
def uri_excludes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Disables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you do not want to compress.
"""
return pulumi.get(self, "uri_excludes")
@uri_excludes.setter
def uri_excludes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "uri_excludes", value)
@property
@pulumi.getter(name="uriIncludes")
def uri_includes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Enables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you want to compress.
"""
return pulumi.get(self, "uri_includes")
@uri_includes.setter
def uri_includes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "uri_includes", value)
class ProfileHttpCompress(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content_type_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
content_type_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
defaults_from: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
uri_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
uri_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
`ltm.ProfileHttpCompress` Virtual server HTTP compression profile configuration
Resources should be named with their "full path". The full path is the combination of the partition + name (example: /Common/my-pool ) or partition + directory + name of the resource (example: /Common/test/my-pool )
## Example Usage
```python
import pulumi
import pulumi_f5bigip as f5bigip
sjhttpcompression = f5bigip.ltm.ProfileHttpCompress("sjhttpcompression",
content_type_excludes=["nicecontentexclude.com"],
content_type_includes=["nicecontent.com"],
defaults_from="/Common/httpcompression",
name="/Common/sjhttpcompression2",
uri_excludes=[
"www.abc.f5.com",
"www.abc2.f5.com",
],
uri_includes=["www.xyzbc.cisco.com"])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_type_excludes: Excludes a specified list of content types from compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_type_includes: Specifies a list of content types for compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
:param pulumi.Input[str] defaults_from: Specifies the profile that you want to use as the parent profile. Your new profile inherits all settings and values from the parent profile specified.
:param pulumi.Input[str] name: Name of the profile_httpcompress
:param pulumi.Input[Sequence[pulumi.Input[str]]] uri_excludes: Disables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you do not want to compress.
:param pulumi.Input[Sequence[pulumi.Input[str]]] uri_includes: Enables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you want to compress.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ProfileHttpCompressArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
`ltm.ProfileHttpCompress` Virtual server HTTP compression profile configuration
Resources should be named with their "full path". The full path is the combination of the partition + name (example: /Common/my-pool ) or partition + directory + name of the resource (example: /Common/test/my-pool )
## Example Usage
```python
import pulumi
import pulumi_f5bigip as f5bigip
sjhttpcompression = f5bigip.ltm.ProfileHttpCompress("sjhttpcompression",
content_type_excludes=["nicecontentexclude.com"],
content_type_includes=["nicecontent.com"],
defaults_from="/Common/httpcompression",
name="/Common/sjhttpcompression2",
uri_excludes=[
"www.abc.f5.com",
"www.abc2.f5.com",
],
uri_includes=["www.xyzbc.cisco.com"])
```
:param str resource_name: The name of the resource.
:param ProfileHttpCompressArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProfileHttpCompressArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content_type_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
content_type_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
defaults_from: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
uri_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
uri_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProfileHttpCompressArgs.__new__(ProfileHttpCompressArgs)
__props__.__dict__["content_type_excludes"] = content_type_excludes
__props__.__dict__["content_type_includes"] = content_type_includes
__props__.__dict__["defaults_from"] = defaults_from
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
__props__.__dict__["uri_excludes"] = uri_excludes
__props__.__dict__["uri_includes"] = uri_includes
super(ProfileHttpCompress, __self__).__init__(
'f5bigip:ltm/profileHttpCompress:ProfileHttpCompress',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
content_type_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
content_type_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
defaults_from: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
uri_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
uri_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'ProfileHttpCompress':
"""
Get an existing ProfileHttpCompress resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_type_excludes: Excludes a specified list of content types from compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_type_includes: Specifies a list of content types for compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
:param pulumi.Input[str] defaults_from: Specifies the profile that you want to use as the parent profile. Your new profile inherits all settings and values from the parent profile specified.
:param pulumi.Input[str] name: Name of the profile_httpcompress
:param pulumi.Input[Sequence[pulumi.Input[str]]] uri_excludes: Disables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you do not want to compress.
:param pulumi.Input[Sequence[pulumi.Input[str]]] uri_includes: Enables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you want to compress.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ProfileHttpCompressState.__new__(_ProfileHttpCompressState)
__props__.__dict__["content_type_excludes"] = content_type_excludes
__props__.__dict__["content_type_includes"] = content_type_includes
__props__.__dict__["defaults_from"] = defaults_from
__props__.__dict__["name"] = name
__props__.__dict__["uri_excludes"] = uri_excludes
__props__.__dict__["uri_includes"] = uri_includes
return ProfileHttpCompress(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="contentTypeExcludes")
def content_type_excludes(self) -> pulumi.Output[Sequence[str]]:
"""
Excludes a specified list of content types from compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
"""
return pulumi.get(self, "content_type_excludes")
@property
@pulumi.getter(name="contentTypeIncludes")
def content_type_includes(self) -> pulumi.Output[Sequence[str]]:
"""
Specifies a list of content types for compression of HTTP Content-Type responses. Use a string list to specify a list of content types you want to compress.
"""
return pulumi.get(self, "content_type_includes")
@property
@pulumi.getter(name="defaultsFrom")
def defaults_from(self) -> pulumi.Output[str]:
"""
Specifies the profile that you want to use as the parent profile. Your new profile inherits all settings and values from the parent profile specified.
"""
return pulumi.get(self, "defaults_from")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the profile_httpcompress
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="uriExcludes")
def uri_excludes(self) -> pulumi.Output[Sequence[str]]:
"""
Disables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you do not want to compress.
"""
return pulumi.get(self, "uri_excludes")
@property
@pulumi.getter(name="uriIncludes")
def uri_includes(self) -> pulumi.Output[Sequence[str]]:
"""
Enables compression on a specified list of HTTP Request-URI responses. Use a regular expression to specify a list of URIs you want to compress.
"""
return pulumi.get(self, "uri_includes")
| 54.231132
| 246
| 0.685527
| 2,871
| 22,994
| 5.306165
| 0.066527
| 0.096757
| 0.07352
| 0.085335
| 0.881843
| 0.871669
| 0.86156
| 0.852304
| 0.848956
| 0.837863
| 0
| 0.000895
| 0.222667
| 22,994
| 423
| 247
| 54.359338
| 0.851404
| 0.413891
| 0
| 0.767241
| 1
| 0
| 0.101739
| 0.036381
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159483
| false
| 0.00431
| 0.021552
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2db1a08c759de59920dc84b0358b47df42806344
| 37
|
py
|
Python
|
project/7.26-7.27/c2.py
|
mintlov3r/oh-my-python
|
b99e65ebe31926d92d825d8ad3294e970d9dc722
|
[
"Apache-2.0"
] | null | null | null |
project/7.26-7.27/c2.py
|
mintlov3r/oh-my-python
|
b99e65ebe31926d92d825d8ad3294e970d9dc722
|
[
"Apache-2.0"
] | null | null | null |
project/7.26-7.27/c2.py
|
mintlov3r/oh-my-python
|
b99e65ebe31926d92d825d8ad3294e970d9dc722
|
[
"Apache-2.0"
] | null | null | null |
import t.c1 as m
print(m.a, m.b, m.c)
| 18.5
| 20
| 0.621622
| 12
| 37
| 1.916667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.162162
| 37
| 2
| 20
| 18.5
| 0.709677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
2dcf0e7a05d765c0698af8511e314d8e14200e60
| 138
|
py
|
Python
|
ctera_gateway_openapi/api/initialized.py
|
ctera/ctera-gateway-openapi
|
0b37af6cd4b53dfe0f66f4dc75dc131e99c63233
|
[
"Apache-2.0"
] | null | null | null |
ctera_gateway_openapi/api/initialized.py
|
ctera/ctera-gateway-openapi
|
0b37af6cd4b53dfe0f66f4dc75dc131e99c63233
|
[
"Apache-2.0"
] | null | null | null |
ctera_gateway_openapi/api/initialized.py
|
ctera/ctera-gateway-openapi
|
0b37af6cd4b53dfe0f66f4dc75dc131e99c63233
|
[
"Apache-2.0"
] | null | null | null |
from ctera_gateway_openapi.managers.login import LoginManager
def is_initialized(**_kwargs):
return LoginManager().is_initialized()
| 23
| 61
| 0.811594
| 16
| 138
| 6.6875
| 0.8125
| 0.242991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101449
| 138
| 5
| 62
| 27.6
| 0.862903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
934088784fe61f371e09649f6ca215bb834f2fb5
| 19,758
|
py
|
Python
|
test/programytest/rdf/test_vars_matching.py
|
cdoebler1/AIML2
|
ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a
|
[
"MIT"
] | 345
|
2016-11-23T22:37:04.000Z
|
2022-03-30T20:44:44.000Z
|
test/programytest/rdf/test_vars_matching.py
|
MikeyBeez/program-y
|
00d7a0c7d50062f18f0ab6f4a041068e119ef7f0
|
[
"MIT"
] | 275
|
2016-12-07T10:30:28.000Z
|
2022-02-08T21:28:33.000Z
|
test/programytest/rdf/test_vars_matching.py
|
VProgramMist/modified-program-y
|
f32efcafafd773683b3fe30054d5485fe9002b7d
|
[
"MIT"
] | 159
|
2016-11-28T18:59:30.000Z
|
2022-03-20T18:02:44.000Z
|
import unittest
from programy.rdf.collection import RDFCollection
class RDFCollectionVarsMatchingTests(unittest.TestCase):
def add_data(self, collection):
collection.add_entity("MONKEY", "LEGS", "2", "ANIMALS")
collection.add_entity("MONKEY", "HASFUR", "true", "ANIMALS")
collection.add_entity("ZEBRA", "LEGS", "4", "ANIMALS")
collection.add_entity("BIRD", "LEGS", "2", "ANIMALS")
collection.add_entity("ELEPHANT", "TRUNK", "true", "ANIMALS")
def test_match_vars(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars()
self.assertIsNotNone(matched)
self.assertEqual(5, len(matched))
self.assertTrue([['subj', 'MONKEY'], ['pred', 'LEGS'], ['obj', '2']] in matched)
self.assertTrue([['subj', 'MONKEY'], ['pred', 'HASFUR'], ['obj', 'true']] in matched)
self.assertTrue([['subj', 'ELEPHANT'], ['pred', 'TRUNK'], ['obj', 'true']] in matched)
self.assertTrue([['subj', 'ZEBRA'], ['pred', 'LEGS'], ['obj', '4']] in matched)
self.assertTrue([['subj', 'BIRD'], ['pred', 'LEGS'], ['obj', '2']] in matched)
def test_not_match_vars(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.not_match_to_vars()
self.assertIsNotNone(matched)
self.assertEqual(0, len(matched))
def test_match_vars_subject(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars("?x")
self.assertIsNotNone(matched)
self.assertEqual(5, len(matched))
self.assertTrue([['?x', 'MONKEY'], ['pred', 'LEGS'], ['obj', '2']] in matched)
self.assertTrue([['?x', 'MONKEY'], ['pred', 'HASFUR'], ['obj', 'true']] in matched)
self.assertTrue([['?x', 'ELEPHANT'], ['pred', 'TRUNK'], ['obj', 'true']] in matched)
self.assertTrue([['?x', 'ZEBRA'], ['pred', 'LEGS'], ['obj', '4']] in matched)
self.assertTrue([['?x', 'BIRD'], ['pred', 'LEGS'], ['obj', '2']] in matched)
def test_not_match_vars_subject(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
not_matched = collection.not_match_to_vars("?x")
self.assertIsNotNone(not_matched)
self.assertEqual(0, len(not_matched))
def test_match_vars_subject_with_predicate_params(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars(subject="?x", predicate="LEGS")
self.assertIsNotNone(matched)
self.assertEqual(3, len(matched))
self.assertTrue([['?x', 'MONKEY'], ['pred', 'LEGS'], ['obj', '2']] in matched)
self.assertTrue([['?x', 'ZEBRA'], ['pred', 'LEGS'], ['obj', '4']] in matched)
self.assertTrue([['?x', 'BIRD'], ['pred', 'LEGS'], ['obj', '2']] in matched)
def test_not_match_vars_subject_with_predicate_params(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
not_matched = collection.not_match_to_vars(subject="?x", predicate="LEGS")
self.assertIsNotNone(not_matched)
self.assertEqual(1, len(not_matched))
self.assertTrue([['?x', 'ELEPHANT'], ['pred', 'TRUNK'], ['obj', 'true']] in not_matched)
def test_match_vars_subject_with_predicate_object_params(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars(subject="?x", predicate="LEGS", obj="2")
self.assertIsNotNone(matched)
self.assertEqual(2, len(matched))
self.assertTrue([['?x', 'MONKEY'], ['pred', 'LEGS'], ['obj', '2']] in matched)
self.assertTrue([['?x', 'BIRD'], ['pred', 'LEGS'], ['obj', '2']] in matched)
def test_not_match_vars_subject_with_predicate_object_params(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.not_match_to_vars(subject="?x", predicate="LEGS", obj="2")
self.assertIsNotNone(matched)
self.assertEqual(2, len(matched))
self.assertTrue([['?x', 'ZEBRA'], ['pred', 'LEGS'], ['obj', '4']] in matched)
self.assertTrue([['?x', 'ELEPHANT'], ['pred', 'TRUNK'], ['obj', 'true']] in matched)
def test_match_vars_subject_predicate(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars(subject="?x", predicate="?y")
self.assertIsNotNone(matched)
self.assertTrue([['?x', 'MONKEY'], ['?y', 'LEGS'], ['obj', '2']] in matched)
self.assertTrue([['?x', 'MONKEY'], ['?y', 'HASFUR'], ['obj', 'true']] in matched)
self.assertTrue([['?x', 'ELEPHANT'], ['?y', 'TRUNK'], ['obj', 'true']] in matched)
self.assertTrue([['?x', 'ZEBRA'], ['?y', 'LEGS'], ['obj', '4']] in matched)
self.assertTrue([['?x', 'BIRD'], ['?y', 'LEGS'], ['obj', '2']] in matched)
def test_match_vars_subject_predicate_with_subject_params(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars(subject="MONKEY", predicate="?y")
self.assertIsNotNone(matched)
self.assertEqual(2, len(matched))
self.assertTrue([['subj', 'MONKEY'], ['?y', 'LEGS'], ['obj', '2']] in matched)
self.assertTrue([['subj', 'MONKEY'], ['?y', 'HASFUR'], ['obj', 'true']] in matched)
def test_not_match_vars_subject_predicate_with_subject_params(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.not_match_to_vars(subject="MONKEY", predicate="?y")
self.assertIsNotNone(matched)
self.assertEqual(3, len(matched))
self.assertTrue([['subj', 'ZEBRA'], ['?y', 'LEGS'], ['obj', '4']] in matched)
self.assertTrue([['subj', 'BIRD'], ['?y', 'LEGS'], ['obj', '2']] in matched)
self.assertTrue([['subj', 'ELEPHANT'], ['?y', 'TRUNK'], ['obj', 'true']] in matched)
def test_match_vars_subject_predicate_with_subject_object_params(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars(subject="MONKEY", predicate="?y", obj="2")
self.assertIsNotNone(matched)
self.assertEqual(1, len(matched))
self.assertTrue([['subj', 'MONKEY'], ['?y', 'LEGS'], ['obj', '2']] in matched)
def test_not_match_vars_subject_predicate_with_subject_object_params(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.not_match_to_vars(subject="MONKEY", predicate="?y", obj="2")
self.assertIsNotNone(matched)
self.assertEqual(3, len(matched))
self.assertTrue([['subj', 'ELEPHANT'], ['?y', 'TRUNK'], ['obj', 'true']] in matched)
self.assertTrue([['subj', 'ZEBRA'], ['?y', 'LEGS'], ['obj', '4']] in matched)
self.assertTrue([['subj', 'BIRD'], ['?y', 'LEGS'], ['obj', '2']] in matched)
def test_match_vars_subject_predicate_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars("?x", "?y", "?z")
self.assertIsNotNone(matched)
self.assertEqual(5, len(matched))
self.assertTrue([['?x', 'MONKEY'], ['?y', 'LEGS'], ['?z', '2']] in matched)
self.assertTrue([['?x', 'MONKEY'], ['?y', 'HASFUR'], ['?z', 'true']] in matched)
self.assertTrue([['?x', 'ELEPHANT'], ['?y', 'TRUNK'], ['?z', 'true']] in matched)
self.assertTrue([['?x', 'ZEBRA'], ['?y', 'LEGS'], ['?z', '4']] in matched)
self.assertTrue([['?x', 'BIRD'], ['?y', 'LEGS'], ['?z', '2']] in matched)
def test_not_match_vars_subject_predicate_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.not_match_to_vars("?x", "?y", "?z")
self.assertIsNotNone(matched)
self.assertEqual(0, len(matched))
def test_match_vars_subject_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars(subject="?x",obj="?z")
self.assertIsNotNone(matched)
self.assertEqual(5, len(matched))
self.assertTrue([['?x', 'MONKEY'], ['pred', 'LEGS'], ['?z', '2']] in matched)
self.assertTrue([['?x', 'MONKEY'], ['pred', 'HASFUR'], ['?z', 'true']] in matched)
self.assertTrue([['?x', 'ELEPHANT'], ['pred', 'TRUNK'], ['?z', 'true']] in matched)
self.assertTrue([['?x', 'ZEBRA'], ['pred', 'LEGS'], ['?z', '4']] in matched)
self.assertTrue([['?x', 'BIRD'], ['pred', 'LEGS'], ['?z', '2']] in matched)
def test_match_vars_predicate(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars(predicate="?x")
self.assertIsNotNone(matched)
self.assertTrue([['subj', 'MONKEY'], ['?x', 'LEGS'], ['obj', '2']] in matched)
self.assertTrue([['subj', 'MONKEY'], ['?x', 'HASFUR'], ['obj', 'true']] in matched)
self.assertTrue([['subj', 'ELEPHANT'], ['?x', 'TRUNK'], ['obj', 'true']] in matched)
self.assertTrue([['subj', 'ZEBRA'], ['?x', 'LEGS'], ['obj', '4']] in matched)
self.assertTrue([['subj', 'BIRD'], ['?x', 'LEGS'], ['obj', '2']] in matched)
def test_match_vars_predicate_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars(predicate="?x", obj="?y")
self.assertIsNotNone(matched)
self.assertTrue([['subj', 'MONKEY'], ['?x', 'LEGS'], ['?y', '2']] in matched)
self.assertTrue([['subj', 'MONKEY'], ['?x', 'HASFUR'], ['?y', 'true']] in matched)
self.assertTrue([['subj', 'ELEPHANT'], ['?x', 'TRUNK'], ['?y', 'true']] in matched)
self.assertTrue([['subj', 'ZEBRA'], ['?x', 'LEGS'], ['?y', '4']] in matched)
self.assertTrue([['subj', 'BIRD'], ['?x', 'LEGS'], ['?y', '2']] in matched)
def test_match_vars_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars(obj="?x")
self.assertIsNotNone(matched)
self.assertEqual(5, len(matched))
self.assertTrue([['subj', 'MONKEY'], ['pred', 'LEGS'], ['?x', '2']] in matched)
self.assertTrue([['subj', 'MONKEY'], ['pred', 'HASFUR'], ['?x', 'true']] in matched)
self.assertTrue([['subj', 'ELEPHANT'], ['pred', 'TRUNK'], ['?x', 'true']] in matched)
self.assertTrue([['subj', 'ZEBRA'], ['pred', 'LEGS'], ['?x', '4']] in matched)
self.assertTrue([['subj', 'BIRD'], ['pred', 'LEGS'], ['?x', '2']] in matched)
def test_match_vars_object_with_subject_params(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars(subject="MONKEY", obj="?x")
self.assertIsNotNone(matched)
self.assertEqual(2, len(matched))
self.assertTrue([['subj', 'MONKEY'], ['pred', 'LEGS'], ['?x', '2']] in matched)
self.assertTrue([['subj', 'MONKEY'], ['pred', 'HASFUR'], ['?x', 'true']] in matched)
def test_not_match_vars_object_with_subject_params(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.not_match_to_vars(subject="MONKEY", obj="?x")
self.assertIsNotNone(matched)
self.assertEqual(3, len(matched))
self.assertTrue([['subj', 'ELEPHANT'], ['pred', 'TRUNK'], ['?x', 'true']] in matched)
self.assertTrue([['subj', 'ZEBRA'], ['pred', 'LEGS'], ['?x', '4']] in matched)
self.assertTrue([['subj', 'BIRD'], ['pred', 'LEGS'], ['?x', '2']] in matched)
def test_match_vars_object_with_subject_predicate_params(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_to_vars(subject="MONKEY", predicate="LEGS", obj="?x")
self.assertIsNotNone(matched)
self.assertEqual(1, len(matched))
self.assertTrue([['subj', 'MONKEY'], ['pred', 'LEGS'], ['?x', '2']] in matched)
def test_not_match_vars_object_with_subject_predicate_params(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.not_match_to_vars(subject="MONKEY", predicate="LEGS", obj="?x")
self.assertIsNotNone(matched)
self.assertEqual(3, len(matched))
self.assertTrue([['subj', 'ELEPHANT'], ['pred', 'TRUNK'], ['?x', 'true']] in matched)
self.assertTrue([['subj', 'ZEBRA'], ['pred', 'LEGS'], ['?x', '4']] in matched)
self.assertTrue([['subj', 'BIRD'], ['pred', 'LEGS'], ['?x', '2']] in matched)
def test_match_only_subject_only(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_only_vars(subject="?x")
self.assertIsNotNone(matched)
self.assertEquals([[['?x', 'MONKEY']], [['?x', 'MONKEY']], [['?x', 'ZEBRA']], [['?x', 'BIRD']], [['?x', 'ELEPHANT']]], matched)
def test_match_only_predicate_only(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_only_vars(predicate="?x")
self.assertIsNotNone(matched)
self.assertEquals([[['?x', 'LEGS']], [['?x', 'HASFUR']], [['?x', 'LEGS']], [['?x', 'LEGS']], [['?x', 'TRUNK']]], matched)
def test_match_only_object_only(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_only_vars(obj="?x")
self.assertIsNotNone(matched)
self.assertEquals([[['?x', '2']], [['?x', 'true']], [['?x', '4']], [['?x', '2']], [['?x', 'true']]], matched)
def test_match_only_subject_and_predicate(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_only_vars(subject="?x", predicate="?y")
self.assertIsNotNone(matched)
self.assertEquals(
[[['?x', 'MONKEY'], ['?y', 'LEGS']], [['?x', 'MONKEY'], ['?y', 'HASFUR']], [['?x', 'ZEBRA'], ['?y', 'LEGS']], [['?x', 'BIRD'], ['?y', 'LEGS']], [['?x', 'ELEPHANT'], ['?y', 'TRUNK']]],
matched)
def test_match_only_subject_and_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_only_vars(subject="?x", obj="?y")
self.assertIsNotNone(matched)
self.assertEquals(
[[['?x', 'MONKEY'], ['?y', '2']], [['?x', 'MONKEY'], ['?y', 'true']], [['?x', 'ZEBRA'], ['?y', '4']], [['?x', 'BIRD'], ['?y', '2']], [['?x', 'ELEPHANT'], ['?y', 'true']]],
matched)
def test_match_only_predicate_and_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_only_vars(predicate="?x", obj="?y")
self.assertIsNotNone(matched)
self.assertEquals(
[[['?x', 'LEGS'], ['?y', '2']], [['?x', 'HASFUR'], ['?y', 'true']], [['?x', 'LEGS'], ['?y', '4']], [['?x', 'LEGS'], ['?y', '2']], [['?x', 'TRUNK'], ['?y', 'true']]],
matched)
def test_match_only_vars_subject_objectvar(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_only_vars(subject="MONKEY", obj="?x")
self.assertIsNotNone(matched)
self.assertEqual(2, len(matched))
self.assertTrue([['?x', '2']] in matched)
self.assertTrue([['?x', 'true']] in matched)
def test_match_only_vars_predicte_objectvar(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_only_vars(predicate="LEGS", obj="?x")
self.assertIsNotNone(matched)
self.assertEqual(3, len(matched))
self.assertTrue([['?x', '2']] in matched)
self.assertTrue([['?x', '4']] in matched)
def test_match_only_vars_subjectvar_predicte_objectvar(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_only_vars(subject="?x", predicate="LEGS", obj="?y")
self.assertIsNotNone(matched)
self.assertEqual(3, len(matched))
self.assertTrue([['?x', 'MONKEY'], ['?y', '2']] in matched)
self.assertTrue([['?x', 'ZEBRA'], ['?y', '4']] in matched)
self.assertTrue([['?x', 'BIRD'], ['?y', '2']] in matched)
def test_match_only_vars_subjectvar_predictevar_objectvar(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_only_vars(subject="?x", predicate="?y", obj="?z")
self.assertIsNotNone(matched)
self.assertEqual(5, len(matched))
self.assertTrue([['?x', 'MONKEY'], ['?y', 'LEGS'], ['?z', '2']] in matched)
self.assertTrue([['?x', 'MONKEY'], ['?y', 'HASFUR'], ['?z', 'true']] in matched)
self.assertTrue([['?x', 'ZEBRA'], ['?y', 'LEGS'], ['?z', '4']] in matched)
self.assertTrue([['?x', 'BIRD'], ['?y', 'LEGS'], ['?z', '2']] in matched)
self.assertTrue([['?x', 'ELEPHANT'], ['?y', 'TRUNK'], ['?z', 'true']] in matched)
def test_match_only_vars_subject_predictevar_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_only_vars(subject="MONKEY", predicate="?x", obj="2")
self.assertIsNotNone(matched)
self.assertEqual(1, len(matched))
self.assertEquals([[['?x', 'LEGS']]], matched)
def test_match_only_vars_no_match(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
matched = collection.match_only_vars(subject="MONKEYX", predicate="?x", obj="2")
self.assertIsNotNone(matched)
self.assertEqual(0, len(matched))
self.assertEquals([], matched)
def test_chungyilinxrspace_issue_175(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
collection.add_entity("ACTOR", "ISA", "PERSON", "TEST")
collection.add_entity("ACTOR", "ISA", "MAN", "TEST")
set1 = collection.match_to_vars("ACTOR", "ISA", "?x")
self.assertTrue([['subj', 'ACTOR'], ['pred', 'ISA'], ['?x', 'MAN']] in set1)
self.assertTrue([['subj', 'ACTOR'], ['pred', 'ISA'], ['?x', 'PERSON']] in set1)
| 42.859002
| 195
| 0.595354
| 2,148
| 19,758
| 5.325885
| 0.032123
| 0.107692
| 0.143182
| 0.108566
| 0.946766
| 0.938899
| 0.911976
| 0.875962
| 0.856031
| 0.738899
| 0
| 0.006398
| 0.201083
| 19,758
| 460
| 196
| 42.952174
| 0.71834
| 0
| 0
| 0.628986
| 0
| 0
| 0.111707
| 0
| 0
| 0
| 0
| 0
| 0.536232
| 1
| 0.107246
| false
| 0
| 0.005797
| 0
| 0.115942
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9369a05f0b94ac1235ff338ff8e77a82e5fc385a
| 4,629
|
py
|
Python
|
tests/unit/conftest.py
|
EpicWink/floto
|
eb0d93d032b5e14e304e350cee28f27cfe735b73
|
[
"MIT"
] | 43
|
2016-02-29T17:44:57.000Z
|
2021-12-28T00:41:47.000Z
|
tests/unit/conftest.py
|
EpicWink/floto
|
eb0d93d032b5e14e304e350cee28f27cfe735b73
|
[
"MIT"
] | 9
|
2016-02-29T23:38:36.000Z
|
2016-09-02T21:48:00.000Z
|
tests/unit/conftest.py
|
EpicWink/floto
|
eb0d93d032b5e14e304e350cee28f27cfe735b73
|
[
"MIT"
] | 10
|
2016-02-29T16:53:09.000Z
|
2018-12-12T00:06:08.000Z
|
import pytest
import datetime as dt
@pytest.fixture
def init_response():
dt1 = dt.datetime(2016, 1, 12, hour=1, tzinfo=dt.timezone.utc)
dt2 = dt.datetime(2016, 1, 12, hour=2, tzinfo=dt.timezone.utc)
dt3 = dt.datetime(2016, 1, 12, hour=3, tzinfo=dt.timezone.utc)
return { 'events': [ { 'decisionTaskStartedEventAttributes': { 'scheduledEventId': 2},
'eventId': 3,
'eventTimestamp': dt3,
'eventType': 'DecisionTaskStarted'},
{ 'decisionTaskScheduledEventAttributes': { 'startToCloseTimeout': '21600',
'taskList': { 'name': 'tl'},
'taskPriority': '0'},
'eventId': 2,
'eventTimestamp': dt2,
'eventType': 'DecisionTaskScheduled'},
{ 'eventId': 1,
'eventTimestamp': dt1,
'eventType': 'WorkflowExecutionStarted',
'workflowExecutionStartedEventAttributes':{'input':'workflow_input'}}],
'previousStartedEventId': 0,
'startedEventId': 3,
'taskToken': 'val_task_token',
'workflowExecution': { 'runId': 'val_run_id',
'workflowId': 'val_workflow_id'},
'workflowType': {'name': 'my_workflow_type', 'version': 'v1'}}
@pytest.fixture
def empty_response():
return { 'previousStartedEventId': 0,
'startedEventId': 3,
'taskToken': 'val_task_token',
'workflowExecution': { 'runId': 'val_run_id',
'workflowId': 'val_workflow_id'},
'workflowType': {'name': 'my_workflow_type', 'version': 'v1'}}
@pytest.fixture
def page1_response():
dt2 = dt.datetime(2016, 1, 12, hour=2, tzinfo=dt.timezone.utc)
dt3 = dt.datetime(2016, 1, 12, hour=3, tzinfo=dt.timezone.utc)
return { 'events': [ { 'decisionTaskStartedEventAttributes': { 'scheduledEventId': 2},
'eventId': 3,
'eventTimestamp': dt3,
'eventType': 'DecisionTaskStarted'},
{ 'eventId': 2,
'eventTimestamp': dt2,
'eventType': 'DecisionTaskCompleted'}],
'nextPageToken': 'page2',
'previousStartedEventId': 2,
'startedEventId': 3,
'taskToken': 'val_task_token',
'workflowExecution': { 'runId': 'val_run_id',
'workflowId': 'val_workflow_id'},
'workflowType': {'name': 'my_workflow_type', 'version': 'v1'}}
@pytest.fixture
def page2_response():
dt1 = dt.datetime(2016, 1, 12, hour=1, tzinfo=dt.timezone.utc)
return { 'events': [ { 'eventId': 1,
'eventTimestamp': dt1,
'eventType': 'WorkflowExecutionStarted',
'workflowExecutionStartedEventAttributes':{'input':'workflow_input'}}],
'nextPageToken': 'page3',
'previousStartedEventId': 0,
'startedEventId': 3}
@pytest.fixture
def page1_decision_response():
dt2 = dt.datetime(2016, 1, 12, hour=2, tzinfo=dt.timezone.utc)
return { 'events': [ { 'decisionTaskStartedEventAttributes': { 'scheduledEventId': 2},
'eventId': 3,
'eventTimestamp': dt2,
'eventType': 'DecisionTaskStarted'}],
'nextPageToken': 'page2',
'previousStartedEventId': 1,
'startedEventId': 3,
'taskToken': 'val_task_token',
'workflowExecution': { 'runId': 'val_run_id',
'workflowId': 'val_workflow_id'},
'workflowType': {'name': 'my_workflow_type', 'version': 'v1'}}
@pytest.fixture
def page2_decision_response():
dt1 = dt.datetime(2016, 1, 12, hour=1, tzinfo=dt.timezone.utc)
return { 'events': [ { 'decisionTaskStartedEventAttributes': { 'scheduledEventId': 2},
'eventId': 1,
'eventTimestamp': dt1,
'eventType': 'DecisionTaskStarted'}],
'previousStartedEventId': 1,
'startedEventId': 3,
'taskToken': 'val_task_token',
'workflowExecution': { 'runId': 'val_run_id',
'workflowId': 'val_workflow_id'},
'workflowType': {'name': 'my_workflow_type', 'version': 'v1'}}
| 46.29
| 98
| 0.513502
| 351
| 4,629
| 6.62963
| 0.185185
| 0.034379
| 0.048131
| 0.051569
| 0.825956
| 0.785131
| 0.785131
| 0.785131
| 0.785131
| 0.785131
| 0
| 0.040478
| 0.348887
| 4,629
| 99
| 99
| 46.757576
| 0.731586
| 0
| 0
| 0.755556
| 0
| 0
| 0.370274
| 0.101966
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.022222
| 0.011111
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
87d066ac9c674aa481a08950da91e49f088b23bd
| 5,319
|
py
|
Python
|
RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/freenect_camera/cfg/FreenectConfig.py
|
QianheYu/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | 1
|
2022-03-11T03:31:15.000Z
|
2022-03-11T03:31:15.000Z
|
RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/freenect_camera/cfg/FreenectConfig.py
|
bravetree/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | null | null | null |
RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/freenect_camera/cfg/FreenectConfig.py
|
bravetree/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | null | null | null |
## *********************************************************
##
## File autogenerated for the freenect_camera package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 245, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 290, 'description': 'Image output mode', 'max': 2, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'image_mode', 'edit_method': "{'enum_description': 'output mode', 'enum': [{'srcline': 8, 'description': '1280x1024', 'srcfile': '/home/xtark/ros_ws/src/third_packages/freenect_stack/freenect_camera/cfg/Freenect.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'SXGA'}, {'srcline': 9, 'description': '640x480', 'srcfile': '/home/xtark/ros_ws/src/third_packages/freenect_stack/freenect_camera/cfg/Freenect.cfg', 'cconsttype': 'const int', 'value': 2, 'ctype': 'int', 'type': 'int', 'name': 'VGA'}]}", 'default': 2, 'level': 0, 'min': 1, 'type': 'int'}, {'srcline': 290, 'description': 'Depth output mode', 'max': 2, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'depth_mode', 'edit_method': "{'enum_description': 'output mode', 'enum': [{'srcline': 8, 'description': '1280x1024', 'srcfile': '/home/xtark/ros_ws/src/third_packages/freenect_stack/freenect_camera/cfg/Freenect.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'SXGA'}, {'srcline': 9, 'description': '640x480', 'srcfile': '/home/xtark/ros_ws/src/third_packages/freenect_stack/freenect_camera/cfg/Freenect.cfg', 'cconsttype': 'const int', 'value': 2, 'ctype': 'int', 'type': 'int', 'name': 'VGA'}]}", 'default': 2, 'level': 0, 'min': 1, 'type': 'int'}, {'srcline': 290, 'description': 'Depth data registration', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'depth_registration', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Skip N images for every image published (rgb/depth/depth_registered/ir)', 'max': 10, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'data_skip', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'depth image time offset in seconds', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'depth_time_offset', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 290, 'description': 'image time offset in seconds', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'image_time_offset', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 290, 'description': 'X offset between IR and depth images', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'depth_ir_offset_x', 'edit_method': '', 'default': 5.0, 'level': 0, 'min': -10.0, 'type': 'double'}, {'srcline': 290, 'description': 'Y offset between IR and depth images', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'depth_ir_offset_y', 'edit_method': '', 'default': 4.0, 'level': 0, 'min': -10.0, 'type': 'double'}, {'srcline': 290, 'description': 'Z offset in mm', 'max': 50, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'z_offset_mm', 'edit_method': '', 'default': 0, 'level': 0, 'min': -50, 'type': 'int'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
Freenect_SXGA = 1
Freenect_VGA = 2
| 136.384615
| 4,405
| 0.660274
| 683
| 5,319
| 5.013177
| 0.174231
| 0.056951
| 0.037967
| 0.058411
| 0.71729
| 0.71729
| 0.707652
| 0.691881
| 0.691881
| 0.691881
| 0
| 0.029767
| 0.096823
| 5,319
| 38
| 4,406
| 139.973684
| 0.682973
| 0.074826
| 0
| 0
| 1
| 0.666667
| 0.695102
| 0.27
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
87dde7f39dcd34d9a67eba52f18a4eda0a352dde
| 2,179
|
py
|
Python
|
tests/data/power_op_spacing.py
|
henrikhorluck/black
|
5379d4f3f460ec9b7063dd1cc10f437b0edf9ae3
|
[
"MIT"
] | 2
|
2022-01-13T08:10:07.000Z
|
2022-01-13T08:35:37.000Z
|
tests/data/power_op_spacing.py
|
marnixah/black-but-usable
|
83b83d3066d1d857983bfa1a666a409e7255d79d
|
[
"MIT"
] | 12
|
2022-01-17T16:17:43.000Z
|
2022-03-28T16:38:39.000Z
|
tests/data/power_op_spacing.py
|
marnixah/black-but-usable
|
83b83d3066d1d857983bfa1a666a409e7255d79d
|
[
"MIT"
] | null | null | null |
def function(**kwargs):
t = a**2 + b**3
return t ** 2
def function_replace_spaces(**kwargs):
t = a **2 + b** 3 + c ** 4
def function_dont_replace_spaces():
{**a, **b, **c}
a = 5**~4
b = 5 ** f()
c = -(5**2)
d = 5 ** f["hi"]
e = lazy(lambda **kwargs: 5)
f = f() ** 5
g = a.b**c.d
h = 5 ** funcs.f()
i = funcs.f() ** 5
j = super().name ** 5
k = [(2**idx, value) for idx, value in pairs]
l = mod.weights_[0] == pytest.approx(0.95**100, abs=0.001)
m = [([2**63], [1, 2**63])]
n = count <= 10**5
o = settings(max_examples=10**6)
p = {(k, k**2): v**2 for k, v in pairs}
q = [10**i for i in range(6)]
r = x**y
a = 5.0**~4.0
b = 5.0 ** f()
c = -(5.0**2.0)
d = 5.0 ** f["hi"]
e = lazy(lambda **kwargs: 5)
f = f() ** 5.0
g = a.b**c.d
h = 5.0 ** funcs.f()
i = funcs.f() ** 5.0
j = super().name ** 5.0
k = [(2.0**idx, value) for idx, value in pairs]
l = mod.weights_[0] == pytest.approx(0.95**100, abs=0.001)
m = [([2.0**63.0], [1.0, 2**63.0])]
n = count <= 10**5.0
o = settings(max_examples=10**6.0)
p = {(k, k**2): v**2.0 for k, v in pairs}
q = [10.5**i for i in range(6)]
# output
def function(**kwargs):
t = a**2 + b**3
return t**2
def function_replace_spaces(**kwargs):
t = a**2 + b**3 + c**4
def function_dont_replace_spaces():
{**a, **b, **c}
a = 5**~4
b = 5 ** f()
c = -(5**2)
d = 5 ** f["hi"]
e = lazy(lambda **kwargs: 5)
f = f() ** 5
g = a.b**c.d
h = 5 ** funcs.f()
i = funcs.f() ** 5
j = super().name ** 5
k = [(2**idx, value) for idx, value in pairs]
l = mod.weights_[0] == pytest.approx(0.95**100, abs=0.001)
m = [([2**63], [1, 2**63])]
n = count <= 10**5
o = settings(max_examples=10**6)
p = {(k, k**2): v**2 for k, v in pairs}
q = [10**i for i in range(6)]
r = x**y
a = 5.0**~4.0
b = 5.0 ** f()
c = -(5.0**2.0)
d = 5.0 ** f["hi"]
e = lazy(lambda **kwargs: 5)
f = f() ** 5.0
g = a.b**c.d
h = 5.0 ** funcs.f()
i = funcs.f() ** 5.0
j = super().name ** 5.0
k = [(2.0**idx, value) for idx, value in pairs]
l = mod.weights_[0] == pytest.approx(0.95**100, abs=0.001)
m = [([2.0**63.0], [1.0, 2**63.0])]
n = count <= 10**5.0
o = settings(max_examples=10**6.0)
p = {(k, k**2): v**2.0 for k, v in pairs}
q = [10.5**i for i in range(6)]
| 20.951923
| 58
| 0.499312
| 483
| 2,179
| 2.215321
| 0.130435
| 0.033645
| 0.016822
| 0.033645
| 0.994393
| 0.994393
| 0.994393
| 0.994393
| 0.994393
| 0.994393
| 0
| 0.124927
| 0.21386
| 2,179
| 103
| 59
| 21.15534
| 0.499708
| 0.002754
| 0
| 1
| 0
| 0
| 0.003685
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3551164364810ddcf4d071b1a62257a5f0b65c53
| 2,784
|
py
|
Python
|
tests/path/apfs_container_path_spec.py
|
dfjxs/dfvfs
|
a4154b07bb08c3c86afa2847f3224189dd80c138
|
[
"Apache-2.0"
] | 176
|
2015-01-02T13:55:39.000Z
|
2022-03-12T11:44:37.000Z
|
tests/path/apfs_container_path_spec.py
|
dfjxs/dfvfs
|
a4154b07bb08c3c86afa2847f3224189dd80c138
|
[
"Apache-2.0"
] | 495
|
2015-01-13T06:47:06.000Z
|
2022-03-12T11:07:03.000Z
|
tests/path/apfs_container_path_spec.py
|
dfjxs/dfvfs
|
a4154b07bb08c3c86afa2847f3224189dd80c138
|
[
"Apache-2.0"
] | 62
|
2015-02-23T08:19:38.000Z
|
2022-03-18T06:01:22.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the APFS container path specification implementation."""
import unittest
from dfvfs.path import apfs_container_path_spec
from tests.path import test_lib
class APFSContainerPathSpecTest(test_lib.PathSpecTestCase):
"""Tests for the APFS container path specification implementation."""
def testInitialize(self):
"""Tests the path specification initialization."""
path_spec = apfs_container_path_spec.APFSContainerPathSpec(
parent=self._path_spec)
self.assertIsNotNone(path_spec)
path_spec = apfs_container_path_spec.APFSContainerPathSpec(
location='/apfs2', parent=self._path_spec)
self.assertIsNotNone(path_spec)
path_spec = apfs_container_path_spec.APFSContainerPathSpec(
volume_index=1, parent=self._path_spec)
self.assertIsNotNone(path_spec)
path_spec = apfs_container_path_spec.APFSContainerPathSpec(
location='/apfs2', volume_index=1, parent=self._path_spec)
self.assertIsNotNone(path_spec)
with self.assertRaises(ValueError):
apfs_container_path_spec.APFSContainerPathSpec(parent=None)
with self.assertRaises(ValueError):
apfs_container_path_spec.APFSContainerPathSpec(
parent=self._path_spec, bogus='BOGUS')
def testComparable(self):
"""Tests the path specification comparable property."""
path_spec = apfs_container_path_spec.APFSContainerPathSpec(
parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: APFS_CONTAINER',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
path_spec = apfs_container_path_spec.APFSContainerPathSpec(
location='/apfs2', parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: APFS_CONTAINER, location: /apfs2',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
path_spec = apfs_container_path_spec.APFSContainerPathSpec(
volume_index=1, parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: APFS_CONTAINER, volume index: 1',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
path_spec = apfs_container_path_spec.APFSContainerPathSpec(
location='/apfs2', volume_index=1, parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: APFS_CONTAINER, location: /apfs2, volume index: 1',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
if __name__ == '__main__':
unittest.main()
| 28.701031
| 71
| 0.718032
| 306
| 2,784
| 6.215686
| 0.173203
| 0.168244
| 0.116193
| 0.121451
| 0.863302
| 0.832808
| 0.832808
| 0.832808
| 0.774974
| 0.768665
| 0
| 0.005682
| 0.178161
| 2,784
| 96
| 72
| 29
| 0.825612
| 0.095187
| 0
| 0.736842
| 0
| 0
| 0.094113
| 0
| 0
| 0
| 0
| 0
| 0.245614
| 1
| 0.035088
| false
| 0
| 0.052632
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
358446bfd41fe18f8014725e7ca3e78a2c8c6044
| 3,430
|
py
|
Python
|
AutomationFramework/tests/interfaces/test_if_config.py
|
sbarguil/Testing-framework
|
f3ef69f1c4f0aeafd02e222d846162c711783b15
|
[
"Apache-2.0"
] | 1
|
2020-04-23T15:22:16.000Z
|
2020-04-23T15:22:16.000Z
|
AutomationFramework/tests/interfaces/test_if_config.py
|
sbarguil/Testing-framework
|
f3ef69f1c4f0aeafd02e222d846162c711783b15
|
[
"Apache-2.0"
] | 44
|
2020-08-13T19:35:41.000Z
|
2021-03-01T09:08:00.000Z
|
AutomationFramework/tests/interfaces/test_if_config.py
|
sbarguil/Testing-framework
|
f3ef69f1c4f0aeafd02e222d846162c711783b15
|
[
"Apache-2.0"
] | 6
|
2020-04-23T15:29:38.000Z
|
2022-03-03T14:23:38.000Z
|
import pytest
from AutomationFramework.page_objects.interfaces.interfaces import Interfaces
from AutomationFramework.tests.base_test import BaseTest
class TestInterfacesConfig(BaseTest):
test_case_file = 'if_config.yml'
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_config_description',
'page_object_class': Interfaces}])
def test_if_config_description(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_config_enabled',
'page_object_class': Interfaces}])
def test_if_config_enabled(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_config_loopback_mode',
'page_object_class': Interfaces}])
def test_if_config_loopback_mode(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_config_mtu',
'page_object_class': Interfaces}])
def test_if_config_mtu(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_config_tpid',
'page_object_class': Interfaces}])
def test_if_config_tpid(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_config_type',
'page_object_class': Interfaces}])
def test_if_config_type(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
| 68.6
| 117
| 0.654519
| 376
| 3,430
| 5.361702
| 0.106383
| 0.146825
| 0.238095
| 0.095238
| 0.866071
| 0.866071
| 0.866071
| 0.866071
| 0.747024
| 0.747024
| 0
| 0
| 0.273761
| 3,430
| 49
| 118
| 70
| 0.809314
| 0
| 0
| 0.585366
| 0
| 0
| 0.150729
| 0.051312
| 0
| 0
| 0
| 0
| 0.146341
| 1
| 0.146341
| false
| 0
| 0.073171
| 0
| 0.268293
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
358eef2ea355c5628e3812428915b00551cd145d
| 15
|
py
|
Python
|
notebooks/python_recap/_solutions/05-numpy77.py
|
rprops/Python_DS-WS
|
b2fc449a74be0c82863e5fcf1ddbe7d64976d530
|
[
"BSD-3-Clause"
] | 65
|
2017-03-21T09:15:40.000Z
|
2022-02-01T23:43:08.000Z
|
notebooks/python_recap/_solutions/05-numpy77.py
|
rprops/Python_DS-WS
|
b2fc449a74be0c82863e5fcf1ddbe7d64976d530
|
[
"BSD-3-Clause"
] | 100
|
2016-12-15T03:44:06.000Z
|
2022-03-07T08:14:07.000Z
|
notebooks/python_recap/_solutions/05-numpy77.py
|
rprops/Python_DS-WS
|
b2fc449a74be0c82863e5fcf1ddbe7d64976d530
|
[
"BSD-3-Clause"
] | 52
|
2016-12-19T07:48:52.000Z
|
2022-02-19T17:53:48.000Z
|
AR[1::2] = 0
AR
| 7.5
| 12
| 0.466667
| 5
| 15
| 1.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.2
| 15
| 2
| 13
| 7.5
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
35c9f52aac4b2089809ffc0eab77d3124dd4567d
| 35,697
|
py
|
Python
|
plugins/easyvista/icon_easyvista/actions/search_tickets/schema.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/easyvista/icon_easyvista/actions/search_tickets/schema.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/easyvista/icon_easyvista/actions/search_tickets/schema.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Search for EasyVista tickets"
class Input:
QUERY = "query"
class Output:
RESULTS = "results"
class SearchTicketsInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"query": {
"type": "string",
"title": "Query",
"description": "Search query. Returns all tickets if left empty",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class SearchTicketsOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"results": {
"$ref": "#/definitions/search_ticket_results",
"title": "Results",
"description": "Search results for the given query",
"order": 1
}
},
"required": [
"results"
],
"definitions": {
"catalog_request": {
"type": "object",
"title": "catalog_request",
"properties": {
"CATALOG_REQUEST_PATH": {
"type": "string",
"title": "Catalog Request Path",
"description": "Catalog request path",
"order": 2
},
"CODE": {
"type": "string",
"title": "Code",
"description": "Code",
"order": 1
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 3
},
"SD_CATALOG_ID": {
"type": "string",
"title": "SD Catalog ID",
"description": "SD catalog ID",
"order": 4
},
"TITLE_EN": {
"type": "string",
"title": "Title EN",
"description": "Title EN",
"order": 5
}
}
},
"comment": {
"type": "object",
"title": "comment",
"properties": {
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 1
}
}
},
"department": {
"type": "object",
"title": "department",
"properties": {
"DEPARTMENT_CODE": {
"type": "string",
"title": "Department Code",
"description": "Department code",
"order": 1
},
"DEPARTMENT_EN": {
"type": "string",
"title": "Department EN",
"description": "Department EN",
"order": 2
},
"DEPARTMENT_ID": {
"type": "string",
"title": "Department ID",
"description": "Department ID",
"order": 5
},
"DEPARTMENT_LABEL": {
"type": "string",
"title": "Department Label",
"description": "Department label",
"order": 6
},
"DEPARTMENT_PATH": {
"type": "string",
"title": "Department Path",
"description": "Department path",
"order": 3
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 4
}
}
},
"employee": {
"type": "object",
"title": "employee",
"properties": {
"BEGIN_OF_CONTRACT": {
"type": "string",
"title": "Begin of Contract",
"description": "Begin of contract",
"order": 1
},
"CELLULAR_NUMBER": {
"type": "string",
"title": "Cellular Number",
"description": "Cellular number",
"order": 2
},
"DEPARTMENT_PATH": {
"type": "string",
"title": "Department Path",
"description": "Department path",
"order": 3
},
"EMPLOYEE_ID": {
"type": "string",
"title": "Employee ID",
"description": "Employee ID",
"order": 5
},
"E_MAIL": {
"type": "string",
"title": "Email",
"description": "Email",
"order": 4
},
"LAST_NAME": {
"type": "string",
"title": "Last Name",
"description": "Last name",
"order": 6
},
"LOCATION_PATH": {
"type": "string",
"title": "Location Path",
"description": "Location path",
"order": 7
},
"PHONE_NUMBER": {
"type": "string",
"title": "Phone Number",
"description": "Phone number",
"order": 8
}
}
},
"known_error": {
"type": "object",
"title": "known_error",
"properties": {
"KNOWNERROR_PATH": {
"type": "string",
"title": "Known Error Path",
"description": "Known error path",
"order": 1
},
"KNOWN_PROBLEMS_ID": {
"type": "string",
"title": "Known Problems ID",
"description": "Known problems ID",
"order": 2
},
"KP_NUMBER": {
"type": "string",
"title": "KP Number",
"description": "KP number",
"order": 3
},
"QUESTION_EN": {
"type": "string",
"title": "Question EN",
"description": "Question EN",
"order": 4
}
}
},
"location": {
"type": "object",
"title": "location",
"properties": {
"CITY": {
"type": "string",
"title": "City",
"description": "City",
"order": 1
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 5
},
"LOCATION_CODE": {
"type": "string",
"title": "Location Code",
"description": "Location code",
"order": 2
},
"LOCATION_EN": {
"type": "string",
"title": "Location EN",
"description": "Location EN",
"order": 3
},
"LOCATION_ID": {
"type": "string",
"title": "Location ID",
"description": "Location ID",
"order": 6
},
"LOCATION_PATH": {
"type": "string",
"title": "Location Path",
"description": "Location path",
"order": 4
}
}
},
"record": {
"type": "object",
"title": "record",
"properties": {
"CATALOG_REQUEST": {
"$ref": "#/definitions/catalog_request",
"title": "Catalog Request",
"description": "Catalog request",
"order": 7
},
"COMMENT": {
"$ref": "#/definitions/comment",
"title": "Comment",
"description": "Comment",
"order": 2
},
"DEPARTMENT": {
"$ref": "#/definitions/department",
"title": "Department",
"description": "Department",
"order": 12
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 1
},
"KNOWNERROR": {
"$ref": "#/definitions/known_error",
"title": "Known Error",
"description": "Known error",
"order": 13
},
"LOCATION": {
"$ref": "#/definitions/location",
"title": "Location",
"description": "Location",
"order": 11
},
"MAX_RESOLUTION_DATE_UT": {
"type": "string",
"title": "Max Resolution Date",
"description": "Max resolution date",
"order": 3
},
"RECIPIENT": {
"$ref": "#/definitions/employee",
"title": "Recipient",
"description": "Recipient",
"order": 9
},
"REQUESTOR": {
"$ref": "#/definitions/employee",
"title": "Requestor",
"description": "Requestor",
"order": 10
},
"REQUEST_ID": {
"type": "string",
"title": "Request ID",
"description": "Request ID",
"order": 4
},
"RFC_NUMBER": {
"type": "string",
"title": "RFC Number",
"description": "RFC number",
"order": 5
},
"STATUS": {
"$ref": "#/definitions/status",
"title": "Status",
"description": "Status",
"order": 8
},
"SUBMIT_DATE_UT": {
"type": "string",
"title": "Submit Date",
"description": "Submit date",
"order": 6
}
},
"definitions": {
"catalog_request": {
"type": "object",
"title": "catalog_request",
"properties": {
"CATALOG_REQUEST_PATH": {
"type": "string",
"title": "Catalog Request Path",
"description": "Catalog request path",
"order": 2
},
"CODE": {
"type": "string",
"title": "Code",
"description": "Code",
"order": 1
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 3
},
"SD_CATALOG_ID": {
"type": "string",
"title": "SD Catalog ID",
"description": "SD catalog ID",
"order": 4
},
"TITLE_EN": {
"type": "string",
"title": "Title EN",
"description": "Title EN",
"order": 5
}
}
},
"comment": {
"type": "object",
"title": "comment",
"properties": {
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 1
}
}
},
"department": {
"type": "object",
"title": "department",
"properties": {
"DEPARTMENT_CODE": {
"type": "string",
"title": "Department Code",
"description": "Department code",
"order": 1
},
"DEPARTMENT_EN": {
"type": "string",
"title": "Department EN",
"description": "Department EN",
"order": 2
},
"DEPARTMENT_ID": {
"type": "string",
"title": "Department ID",
"description": "Department ID",
"order": 5
},
"DEPARTMENT_LABEL": {
"type": "string",
"title": "Department Label",
"description": "Department label",
"order": 6
},
"DEPARTMENT_PATH": {
"type": "string",
"title": "Department Path",
"description": "Department path",
"order": 3
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 4
}
}
},
"employee": {
"type": "object",
"title": "employee",
"properties": {
"BEGIN_OF_CONTRACT": {
"type": "string",
"title": "Begin of Contract",
"description": "Begin of contract",
"order": 1
},
"CELLULAR_NUMBER": {
"type": "string",
"title": "Cellular Number",
"description": "Cellular number",
"order": 2
},
"DEPARTMENT_PATH": {
"type": "string",
"title": "Department Path",
"description": "Department path",
"order": 3
},
"EMPLOYEE_ID": {
"type": "string",
"title": "Employee ID",
"description": "Employee ID",
"order": 5
},
"E_MAIL": {
"type": "string",
"title": "Email",
"description": "Email",
"order": 4
},
"LAST_NAME": {
"type": "string",
"title": "Last Name",
"description": "Last name",
"order": 6
},
"LOCATION_PATH": {
"type": "string",
"title": "Location Path",
"description": "Location path",
"order": 7
},
"PHONE_NUMBER": {
"type": "string",
"title": "Phone Number",
"description": "Phone number",
"order": 8
}
}
},
"known_error": {
"type": "object",
"title": "known_error",
"properties": {
"KNOWNERROR_PATH": {
"type": "string",
"title": "Known Error Path",
"description": "Known error path",
"order": 1
},
"KNOWN_PROBLEMS_ID": {
"type": "string",
"title": "Known Problems ID",
"description": "Known problems ID",
"order": 2
},
"KP_NUMBER": {
"type": "string",
"title": "KP Number",
"description": "KP number",
"order": 3
},
"QUESTION_EN": {
"type": "string",
"title": "Question EN",
"description": "Question EN",
"order": 4
}
}
},
"location": {
"type": "object",
"title": "location",
"properties": {
"CITY": {
"type": "string",
"title": "City",
"description": "City",
"order": 1
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 5
},
"LOCATION_CODE": {
"type": "string",
"title": "Location Code",
"description": "Location code",
"order": 2
},
"LOCATION_EN": {
"type": "string",
"title": "Location EN",
"description": "Location EN",
"order": 3
},
"LOCATION_ID": {
"type": "string",
"title": "Location ID",
"description": "Location ID",
"order": 6
},
"LOCATION_PATH": {
"type": "string",
"title": "Location Path",
"description": "Location path",
"order": 4
}
}
},
"status": {
"type": "object",
"title": "status",
"properties": {
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 3
},
"STATUS_EN": {
"type": "string",
"title": "Status EN",
"description": "Status EN",
"order": 1
},
"STATUS_GUID": {
"type": "string",
"title": "Status GUID",
"description": "Status GUID",
"order": 2
},
"STATUS_ID": {
"type": "string",
"title": "Status ID",
"description": "Status ID",
"order": 4
}
}
}
}
},
"search_ticket_results": {
"type": "object",
"title": "search_ticket_results",
"properties": {
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 1
},
"record_count": {
"type": "string",
"title": "Record Count",
"description": "Record count",
"order": 2
},
"records": {
"type": "array",
"title": "Records",
"description": "Records",
"items": {
"$ref": "#/definitions/record"
},
"order": 4
},
"total_record_count": {
"type": "string",
"title": "Total Record Count",
"description": "Total record count",
"order": 3
}
},
"definitions": {
"catalog_request": {
"type": "object",
"title": "catalog_request",
"properties": {
"CATALOG_REQUEST_PATH": {
"type": "string",
"title": "Catalog Request Path",
"description": "Catalog request path",
"order": 2
},
"CODE": {
"type": "string",
"title": "Code",
"description": "Code",
"order": 1
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 3
},
"SD_CATALOG_ID": {
"type": "string",
"title": "SD Catalog ID",
"description": "SD catalog ID",
"order": 4
},
"TITLE_EN": {
"type": "string",
"title": "Title EN",
"description": "Title EN",
"order": 5
}
}
},
"comment": {
"type": "object",
"title": "comment",
"properties": {
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 1
}
}
},
"department": {
"type": "object",
"title": "department",
"properties": {
"DEPARTMENT_CODE": {
"type": "string",
"title": "Department Code",
"description": "Department code",
"order": 1
},
"DEPARTMENT_EN": {
"type": "string",
"title": "Department EN",
"description": "Department EN",
"order": 2
},
"DEPARTMENT_ID": {
"type": "string",
"title": "Department ID",
"description": "Department ID",
"order": 5
},
"DEPARTMENT_LABEL": {
"type": "string",
"title": "Department Label",
"description": "Department label",
"order": 6
},
"DEPARTMENT_PATH": {
"type": "string",
"title": "Department Path",
"description": "Department path",
"order": 3
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 4
}
}
},
"employee": {
"type": "object",
"title": "employee",
"properties": {
"BEGIN_OF_CONTRACT": {
"type": "string",
"title": "Begin of Contract",
"description": "Begin of contract",
"order": 1
},
"CELLULAR_NUMBER": {
"type": "string",
"title": "Cellular Number",
"description": "Cellular number",
"order": 2
},
"DEPARTMENT_PATH": {
"type": "string",
"title": "Department Path",
"description": "Department path",
"order": 3
},
"EMPLOYEE_ID": {
"type": "string",
"title": "Employee ID",
"description": "Employee ID",
"order": 5
},
"E_MAIL": {
"type": "string",
"title": "Email",
"description": "Email",
"order": 4
},
"LAST_NAME": {
"type": "string",
"title": "Last Name",
"description": "Last name",
"order": 6
},
"LOCATION_PATH": {
"type": "string",
"title": "Location Path",
"description": "Location path",
"order": 7
},
"PHONE_NUMBER": {
"type": "string",
"title": "Phone Number",
"description": "Phone number",
"order": 8
}
}
},
"known_error": {
"type": "object",
"title": "known_error",
"properties": {
"KNOWNERROR_PATH": {
"type": "string",
"title": "Known Error Path",
"description": "Known error path",
"order": 1
},
"KNOWN_PROBLEMS_ID": {
"type": "string",
"title": "Known Problems ID",
"description": "Known problems ID",
"order": 2
},
"KP_NUMBER": {
"type": "string",
"title": "KP Number",
"description": "KP number",
"order": 3
},
"QUESTION_EN": {
"type": "string",
"title": "Question EN",
"description": "Question EN",
"order": 4
}
}
},
"location": {
"type": "object",
"title": "location",
"properties": {
"CITY": {
"type": "string",
"title": "City",
"description": "City",
"order": 1
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 5
},
"LOCATION_CODE": {
"type": "string",
"title": "Location Code",
"description": "Location code",
"order": 2
},
"LOCATION_EN": {
"type": "string",
"title": "Location EN",
"description": "Location EN",
"order": 3
},
"LOCATION_ID": {
"type": "string",
"title": "Location ID",
"description": "Location ID",
"order": 6
},
"LOCATION_PATH": {
"type": "string",
"title": "Location Path",
"description": "Location path",
"order": 4
}
}
},
"record": {
"type": "object",
"title": "record",
"properties": {
"CATALOG_REQUEST": {
"$ref": "#/definitions/catalog_request",
"title": "Catalog Request",
"description": "Catalog request",
"order": 7
},
"COMMENT": {
"$ref": "#/definitions/comment",
"title": "Comment",
"description": "Comment",
"order": 2
},
"DEPARTMENT": {
"$ref": "#/definitions/department",
"title": "Department",
"description": "Department",
"order": 12
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 1
},
"KNOWNERROR": {
"$ref": "#/definitions/known_error",
"title": "Known Error",
"description": "Known error",
"order": 13
},
"LOCATION": {
"$ref": "#/definitions/location",
"title": "Location",
"description": "Location",
"order": 11
},
"MAX_RESOLUTION_DATE_UT": {
"type": "string",
"title": "Max Resolution Date",
"description": "Max resolution date",
"order": 3
},
"RECIPIENT": {
"$ref": "#/definitions/employee",
"title": "Recipient",
"description": "Recipient",
"order": 9
},
"REQUESTOR": {
"$ref": "#/definitions/employee",
"title": "Requestor",
"description": "Requestor",
"order": 10
},
"REQUEST_ID": {
"type": "string",
"title": "Request ID",
"description": "Request ID",
"order": 4
},
"RFC_NUMBER": {
"type": "string",
"title": "RFC Number",
"description": "RFC number",
"order": 5
},
"STATUS": {
"$ref": "#/definitions/status",
"title": "Status",
"description": "Status",
"order": 8
},
"SUBMIT_DATE_UT": {
"type": "string",
"title": "Submit Date",
"description": "Submit date",
"order": 6
}
},
"definitions": {
"catalog_request": {
"type": "object",
"title": "catalog_request",
"properties": {
"CATALOG_REQUEST_PATH": {
"type": "string",
"title": "Catalog Request Path",
"description": "Catalog request path",
"order": 2
},
"CODE": {
"type": "string",
"title": "Code",
"description": "Code",
"order": 1
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 3
},
"SD_CATALOG_ID": {
"type": "string",
"title": "SD Catalog ID",
"description": "SD catalog ID",
"order": 4
},
"TITLE_EN": {
"type": "string",
"title": "Title EN",
"description": "Title EN",
"order": 5
}
}
},
"comment": {
"type": "object",
"title": "comment",
"properties": {
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 1
}
}
},
"department": {
"type": "object",
"title": "department",
"properties": {
"DEPARTMENT_CODE": {
"type": "string",
"title": "Department Code",
"description": "Department code",
"order": 1
},
"DEPARTMENT_EN": {
"type": "string",
"title": "Department EN",
"description": "Department EN",
"order": 2
},
"DEPARTMENT_ID": {
"type": "string",
"title": "Department ID",
"description": "Department ID",
"order": 5
},
"DEPARTMENT_LABEL": {
"type": "string",
"title": "Department Label",
"description": "Department label",
"order": 6
},
"DEPARTMENT_PATH": {
"type": "string",
"title": "Department Path",
"description": "Department path",
"order": 3
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 4
}
}
},
"employee": {
"type": "object",
"title": "employee",
"properties": {
"BEGIN_OF_CONTRACT": {
"type": "string",
"title": "Begin of Contract",
"description": "Begin of contract",
"order": 1
},
"CELLULAR_NUMBER": {
"type": "string",
"title": "Cellular Number",
"description": "Cellular number",
"order": 2
},
"DEPARTMENT_PATH": {
"type": "string",
"title": "Department Path",
"description": "Department path",
"order": 3
},
"EMPLOYEE_ID": {
"type": "string",
"title": "Employee ID",
"description": "Employee ID",
"order": 5
},
"E_MAIL": {
"type": "string",
"title": "Email",
"description": "Email",
"order": 4
},
"LAST_NAME": {
"type": "string",
"title": "Last Name",
"description": "Last name",
"order": 6
},
"LOCATION_PATH": {
"type": "string",
"title": "Location Path",
"description": "Location path",
"order": 7
},
"PHONE_NUMBER": {
"type": "string",
"title": "Phone Number",
"description": "Phone number",
"order": 8
}
}
},
"known_error": {
"type": "object",
"title": "known_error",
"properties": {
"KNOWNERROR_PATH": {
"type": "string",
"title": "Known Error Path",
"description": "Known error path",
"order": 1
},
"KNOWN_PROBLEMS_ID": {
"type": "string",
"title": "Known Problems ID",
"description": "Known problems ID",
"order": 2
},
"KP_NUMBER": {
"type": "string",
"title": "KP Number",
"description": "KP number",
"order": 3
},
"QUESTION_EN": {
"type": "string",
"title": "Question EN",
"description": "Question EN",
"order": 4
}
}
},
"location": {
"type": "object",
"title": "location",
"properties": {
"CITY": {
"type": "string",
"title": "City",
"description": "City",
"order": 1
},
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 5
},
"LOCATION_CODE": {
"type": "string",
"title": "Location Code",
"description": "Location code",
"order": 2
},
"LOCATION_EN": {
"type": "string",
"title": "Location EN",
"description": "Location EN",
"order": 3
},
"LOCATION_ID": {
"type": "string",
"title": "Location ID",
"description": "Location ID",
"order": 6
},
"LOCATION_PATH": {
"type": "string",
"title": "Location Path",
"description": "Location path",
"order": 4
}
}
},
"status": {
"type": "object",
"title": "status",
"properties": {
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 3
},
"STATUS_EN": {
"type": "string",
"title": "Status EN",
"description": "Status EN",
"order": 1
},
"STATUS_GUID": {
"type": "string",
"title": "Status GUID",
"description": "Status GUID",
"order": 2
},
"STATUS_ID": {
"type": "string",
"title": "Status ID",
"description": "Status ID",
"order": 4
}
}
}
}
},
"status": {
"type": "object",
"title": "status",
"properties": {
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 3
},
"STATUS_EN": {
"type": "string",
"title": "Status EN",
"description": "Status EN",
"order": 1
},
"STATUS_GUID": {
"type": "string",
"title": "Status GUID",
"description": "Status GUID",
"order": 2
},
"STATUS_ID": {
"type": "string",
"title": "Status ID",
"description": "Status ID",
"order": 4
}
}
}
}
},
"status": {
"type": "object",
"title": "status",
"properties": {
"HREF": {
"type": "string",
"title": "HREF",
"description": "HREF hyperlink",
"order": 3
},
"STATUS_EN": {
"type": "string",
"title": "Status EN",
"description": "Status EN",
"order": 1
},
"STATUS_GUID": {
"type": "string",
"title": "Status GUID",
"description": "Status GUID",
"order": 2
},
"STATUS_ID": {
"type": "string",
"title": "Status ID",
"description": "Status ID",
"order": 4
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 28.511981
| 71
| 0.361263
| 2,287
| 35,697
| 5.551815
| 0.048098
| 0.118138
| 0.177207
| 0.034811
| 0.945184
| 0.941088
| 0.941088
| 0.941088
| 0.93337
| 0.93337
| 0
| 0.009604
| 0.486624
| 35,697
| 1,251
| 72
| 28.534772
| 0.683237
| 0.001037
| 0
| 0.777868
| 1
| 0
| 0.985333
| 0.019322
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001616
| false
| 0
| 0.001616
| 0
| 0.011309
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ea60b22c68896af6447ae0785e71821db80d4713
| 167
|
py
|
Python
|
tests/models.py
|
webu/dalec
|
ddc4f3c4627c84c5a70e9052d28f77d6ff8755a7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/models.py
|
webu/dalec
|
ddc4f3c4627c84c5a70e9052d28f77d6ff8755a7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/models.py
|
webu/dalec
|
ddc4f3c4627c84c5a70e9052d28f77d6ff8755a7
|
[
"BSD-3-Clause"
] | null | null | null |
from dalec.models import ContentBase
from dalec.models import FetchHistoryBase
class Content(ContentBase):
pass
class FetchHistory(FetchHistoryBase):
pass
| 15.181818
| 41
| 0.796407
| 18
| 167
| 7.388889
| 0.555556
| 0.135338
| 0.225564
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155689
| 167
| 10
| 42
| 16.7
| 0.943262
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
576b4dbf2b254279957ec910b83710450d26a152
| 5,785
|
py
|
Python
|
ctre/__init__.py
|
TheTripleV/robotpy-ctre
|
2b27ec8b0b9eb0885d57acb5e7ade5d97c32194b
|
[
"Apache-2.0"
] | null | null | null |
ctre/__init__.py
|
TheTripleV/robotpy-ctre
|
2b27ec8b0b9eb0885d57acb5e7ade5d97c32194b
|
[
"Apache-2.0"
] | null | null | null |
ctre/__init__.py
|
TheTripleV/robotpy-ctre
|
2b27ec8b0b9eb0885d57acb5e7ade5d97c32194b
|
[
"Apache-2.0"
] | null | null | null |
from . import _init_ctre
# autogenerated by 'robotpy-build create-imports ctre ctre._ctre'
from ._ctre import (
AbsoluteSensorRange,
AbsoluteSensorRangeRoutines,
BaseMotorController,
BaseMotorControllerConfiguration,
BaseMotorControllerUtil,
BasePIDSetConfiguration,
BaseTalon,
BaseTalonConfigUtil,
BaseTalonConfiguration,
BaseTalonPIDSetConfigUtil,
BaseTalonPIDSetConfiguration,
BufferedTrajectoryPointStream,
CANBusAddressable,
CANCoder,
CANCoderConfigUtils,
CANCoderConfiguration,
CANCoderFaults,
CANCoderStatusFrame,
CANCoderStickyFaults,
CANifier,
CANifierConfigUtils,
CANifierConfiguration,
CANifierControlFrame,
CANifierFaults,
CANifierStatusFrame,
CANifierStickyFaults,
CANifierVelocityMeasPeriod,
CANifierVelocityMeasPeriodRoutines,
ControlFrame,
ControlFrameEnhanced,
ControlFrameRoutines,
ControlMode,
CustomParamConfigUtil,
CustomParamConfiguration,
DemandType,
ErrorCode,
Faults,
FeedbackDevice,
FeedbackDeviceRoutines,
FilterConfigUtil,
FilterConfiguration,
FollowerType,
IFollower,
IMotorController,
IMotorControllerEnhanced,
InvertType,
LimitSwitchNormal,
LimitSwitchRoutines,
LimitSwitchSource,
MagnetFieldStrength,
MotionProfileStatus,
MotorCommutation,
NeutralMode,
Orchestra,
ParamEnum,
PigeonIMU,
PigeonIMUConfigUtils,
PigeonIMUConfiguration,
PigeonIMU_ControlFrame,
PigeonIMU_Faults,
PigeonIMU_StatusFrame,
PigeonIMU_StickyFaults,
RemoteFeedbackDevice,
RemoteLimitSwitchSource,
RemoteSensorSource,
RemoteSensorSourceRoutines,
SensorCollection,
SensorInitializationStrategy,
SensorInitializationStrategyRoutines,
SensorTerm,
SensorTermRoutines,
SensorTimeBase,
SensorTimeBaseRoutines,
SensorVelocityMeasPeriod,
SensorVelocityMeasPeriodRoutines,
SetValueMotionProfile,
SlotConfigUtil,
SlotConfiguration,
StatorCurrentLimitConfiguration,
StatusFrame,
StatusFrameEnhanced,
StatusFrameRoutines,
StickyFaults,
SupplyCurrentLimitConfiguration,
TalonFX,
TalonFXConfigUtil,
TalonFXConfiguration,
TalonFXControlMode,
TalonFXFeedbackDevice,
TalonFXInvertType,
TalonFXPIDSetConfiguration,
TalonFXSensorCollection,
TalonSRX,
TalonSRXConfigUtil,
TalonSRXConfiguration,
TalonSRXFeedbackDevice,
TalonSRXPIDSetConfiguration,
TrajectoryPoint,
VelocityMeasPeriod,
VelocityMeasPeriodRoutines,
VictorConfigUtil,
VictorSPX,
VictorSPXConfiguration,
VictorSPXPIDSetConfigUtil,
VictorSPXPIDSetConfiguration,
WPI_BaseMotorController,
WPI_TalonFX,
WPI_TalonSRX,
WPI_VictorSPX,
)
__all__ = [
"AbsoluteSensorRange",
"AbsoluteSensorRangeRoutines",
"BaseMotorController",
"BaseMotorControllerConfiguration",
"BaseMotorControllerUtil",
"BasePIDSetConfiguration",
"BaseTalon",
"BaseTalonConfigUtil",
"BaseTalonConfiguration",
"BaseTalonPIDSetConfigUtil",
"BaseTalonPIDSetConfiguration",
"BufferedTrajectoryPointStream",
"CANBusAddressable",
"CANCoder",
"CANCoderConfigUtils",
"CANCoderConfiguration",
"CANCoderFaults",
"CANCoderStatusFrame",
"CANCoderStickyFaults",
"CANifier",
"CANifierConfigUtils",
"CANifierConfiguration",
"CANifierControlFrame",
"CANifierFaults",
"CANifierStatusFrame",
"CANifierStickyFaults",
"CANifierVelocityMeasPeriod",
"CANifierVelocityMeasPeriodRoutines",
"ControlFrame",
"ControlFrameEnhanced",
"ControlFrameRoutines",
"ControlMode",
"CustomParamConfigUtil",
"CustomParamConfiguration",
"DemandType",
"ErrorCode",
"Faults",
"FeedbackDevice",
"FeedbackDeviceRoutines",
"FilterConfigUtil",
"FilterConfiguration",
"FollowerType",
"IFollower",
"IMotorController",
"IMotorControllerEnhanced",
"InvertType",
"LimitSwitchNormal",
"LimitSwitchRoutines",
"LimitSwitchSource",
"MagnetFieldStrength",
"MotionProfileStatus",
"MotorCommutation",
"NeutralMode",
"Orchestra",
"ParamEnum",
"PigeonIMU",
"PigeonIMUConfigUtils",
"PigeonIMUConfiguration",
"PigeonIMU_ControlFrame",
"PigeonIMU_Faults",
"PigeonIMU_StatusFrame",
"PigeonIMU_StickyFaults",
"RemoteFeedbackDevice",
"RemoteLimitSwitchSource",
"RemoteSensorSource",
"RemoteSensorSourceRoutines",
"SensorCollection",
"SensorInitializationStrategy",
"SensorInitializationStrategyRoutines",
"SensorTerm",
"SensorTermRoutines",
"SensorTimeBase",
"SensorTimeBaseRoutines",
"SensorVelocityMeasPeriod",
"SensorVelocityMeasPeriodRoutines",
"SetValueMotionProfile",
"SlotConfigUtil",
"SlotConfiguration",
"StatorCurrentLimitConfiguration",
"StatusFrame",
"StatusFrameEnhanced",
"StatusFrameRoutines",
"StickyFaults",
"SupplyCurrentLimitConfiguration",
"TalonFX",
"TalonFXConfigUtil",
"TalonFXConfiguration",
"TalonFXControlMode",
"TalonFXFeedbackDevice",
"TalonFXInvertType",
"TalonFXPIDSetConfiguration",
"TalonFXSensorCollection",
"TalonSRX",
"TalonSRXConfigUtil",
"TalonSRXConfiguration",
"TalonSRXFeedbackDevice",
"TalonSRXPIDSetConfiguration",
"TrajectoryPoint",
"VelocityMeasPeriod",
"VelocityMeasPeriodRoutines",
"VictorConfigUtil",
"VictorSPX",
"VictorSPXConfiguration",
"VictorSPXPIDSetConfigUtil",
"VictorSPXPIDSetConfiguration",
"WPI_BaseMotorController",
"WPI_TalonFX",
"WPI_TalonSRX",
"WPI_VictorSPX",
]
from .version import version as __version__
| 25.262009
| 65
| 0.728781
| 257
| 5,785
| 16.29572
| 0.447471
| 0.00382
| 0.031041
| 0.046323
| 0.971347
| 0.971347
| 0.971347
| 0.971347
| 0.971347
| 0.971347
| 0
| 0
| 0.193431
| 5,785
| 228
| 66
| 25.372807
| 0.897557
| 0.01089
| 0
| 0
| 1
| 0
| 0.356993
| 0.179545
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013393
| 0
| 0.013393
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5774bbb2703cca5fce37dac812351d303e37f096
| 45,438
|
py
|
Python
|
openregistry/assets/bounce/tests/blanks/asset.py
|
EBRD-ProzorroSale/openregistry.assets.bounce
|
b3ce1720b62de78f4c08c2d4d88e6b056c8cdbb5
|
[
"Apache-2.0"
] | null | null | null |
openregistry/assets/bounce/tests/blanks/asset.py
|
EBRD-ProzorroSale/openregistry.assets.bounce
|
b3ce1720b62de78f4c08c2d4d88e6b056c8cdbb5
|
[
"Apache-2.0"
] | 44
|
2018-04-20T16:06:22.000Z
|
2022-03-21T22:16:35.000Z
|
openregistry/assets/bounce/tests/blanks/asset.py
|
EBRD-ProzorroSale/openregistry.assets.bounce
|
b3ce1720b62de78f4c08c2d4d88e6b056c8cdbb5
|
[
"Apache-2.0"
] | 8
|
2018-04-17T09:12:27.000Z
|
2019-03-26T13:58:59.000Z
|
# -*- coding: utf-8 -*-
from copy import deepcopy
from datetime import timedelta
from uuid import uuid4
from openregistry.assets.core.tests.base import create_blacklist
from openregistry.assets.core.tests.blanks.json_data import test_loki_item_data
from openregistry.assets.core.constants import STATUS_CHANGES, ASSET_STATUSES
from openregistry.assets.core.models import (
Period
)
from openregistry.assets.bounce.tests.base import (
check_patch_status_200,
check_patch_status_403
)
from openregistry.assets.core.utils import (
get_now,
calculate_business_date
)
def post_related_process(self, asset_id, related_process_id=uuid4().hex):
return self.app.post_json(
'/{0}/related_processes'.format(asset_id),
{
'data': {
'relatedProcessID': related_process_id,
'type': 'lot',
}
},
status=201
)
# AssetResourceTest
def add_cancellationDetails_document(self, asset):
# Add cancellationDetails document
test_document_data = {
# 'url': self.generate_docservice_url(),
'title': u'укр.doc',
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
'documentType': 'cancellationDetails'
}
test_document_data['url'] = self.generate_docservice_url()
response = self.app.post_json('/{}/documents'.format(asset['id']),
headers=self.access_header,
params={'data': test_document_data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual(u'укр.doc', response.json["data"]["title"])
self.assertIn('Signature=', response.json["data"]["url"])
self.assertIn('KeyID=', response.json["data"]["url"])
self.assertNotIn('Expires=', response.json["data"]["url"])
key = response.json["data"]["url"].split('/')[-1].split('?')[0]
tender = self.db.get(self.resource_id)
self.assertIn(key, tender['documents'][-1]["url"])
self.assertIn('Signature=', tender['documents'][-1]["url"])
self.assertIn('KeyID=', tender['documents'][-1]["url"])
self.assertNotIn('Expires=', tender['documents'][-1]["url"])
def patch_asset(self):
response = self.app.get('/')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
asset = self.create_resource()
dateModified = asset.pop('dateModified')
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'title': ' PATCHED'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertNotEqual(response.json['data']['dateModified'], dateModified)
asset = self.create_resource()
self.set_status('draft')
# Move status from Draft to Active
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'active'}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (draft) status")
# Move status from Draft to Deleted
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'deleted'}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (draft) status")
# Move status from Draft to Complete
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'complete'}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (draft) status")
# Move status from Draft to Pending
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'pending'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'pending')
# Move status from Pending to Draft
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'draft'}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't switch asset to draft status")
# Move status from Pending to Active
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'active'}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (pending) status")
# Move status from Pending to Complete
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'complete'}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (pending) status")
# Move status from Pending to Deleted 403
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'deleted'}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'][0]['description'],
u"You can set deleted status "
u"only when asset have at least one document with \'cancellationDetails\' documentType")
add_cancellationDetails_document(self, asset)
# Move status from Pending to Deleted
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'deleted'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'deleted')
# Move status from Deleted to Draft
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'draft'}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (deleted) status")
# Move status from Deleted to Pending
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'pending'}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (deleted) status")
# Move status from Deleted to Active
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'active'}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (deleted) status")
# Move status from Deleted to Complete
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'complete'}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (deleted) status")
def simple_add_asset(self):
u = self.asset_model(self.initial_data)
u.assetID = "UA-X"
assert u.id is None
assert u.rev is None
u.store(self.db)
assert u.id is not None
assert u.rev is not None
fromdb = self.db.get(u.id)
assert u.assetID == fromdb['assetID']
assert u.doc_type == "Asset"
u.delete_instance(self.db)
# Asset workflow test
ROLES = ['asset_owner', 'Administrator', 'concierge', 'convoy']
STATUS_BLACKLIST = create_blacklist(STATUS_CHANGES, ASSET_STATUSES, ROLES)
def create_asset_with_items(self):
data = deepcopy(self.initial_data)
data['items'] = [deepcopy(test_loki_item_data)]
response = self.app.post_json('/', params={'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('id', response.json['data']['items'][0])
self.assertEqual(response.json['data']['items'][0]['unit'], data['items'][0]['unit'])
self.assertEqual(response.json['data']['items'][0]['classification'], data['items'][0]['classification'])
self.assertEqual(response.json['data']['items'][0]['address'], data['items'][0]['address'])
self.assertEqual(response.json['data']['items'][0]['quantity'], data['items'][0]['quantity'])
self.assertEqual(response.json['data']['items'][0]['additionalClassifications'], data['items'][0]['additionalClassifications'])
del data['items'][0]['unit']
response = self.app.post_json('/', params={'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['description'][0]['unit'], ['This field is required.'])
def dateModified_resource(self):
response = self.app.get('/')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
response = self.app.post_json('/', {'data': self.initial_data})
self.assertEqual(response.status, '201 Created')
resource = response.json['data']
token = str(response.json['access']['token'])
dateModified = resource['dateModified']
response = self.app.get('/{}'.format(resource['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['dateModified'], dateModified)
# Add decision
response = self.app.get('/{}'.format(resource['id']))
old_decs_count = len(response.json['data'].get('decisions', []))
decision_data = {
'decisionDate': get_now().isoformat(),
'decisionID': 'decisionLotID'
}
response = self.app.post_json(
'/{}/decisions'.format(resource['id']),
{"data": decision_data},
headers={'X-Access-Token': token}
)
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.json['data']['decisionDate'], decision_data['decisionDate'])
self.assertEqual(response.json['data']['decisionID'], decision_data['decisionID'])
response = self.app.get('/{}'.format(resource['id']))
present_decs_count = len(response.json['data'].get('decisions', []))
self.assertEqual(old_decs_count + 1, present_decs_count)
resource = response.json['data']
response = self.app.patch_json('/{}'.format(resource['id']),
headers={'X-Access-Token': token}, params={
'data': {'status': 'pending'}
})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'pending')
self.assertNotEqual(response.json['data']['dateModified'], dateModified)
resource = response.json['data']
dateModified = resource['dateModified']
response = self.app.get('/{}'.format(resource['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], resource)
self.assertEqual(response.json['data']['dateModified'], dateModified)
def change_pending_asset(self):
response = self.app.get('/')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
asset = self.create_resource()
self.app.authorization = ('Basic', ('convoy', ''))
# Move from 'pending' to one of blacklist status
for status in STATUS_BLACKLIST['pending']['convoy']:
check_patch_status_403(self, asset['id'], status)
self.app.authorization = ('Basic', ('broker', ''))
# Move from 'pending' to one of blacklist status
for status in STATUS_BLACKLIST['pending']['asset_owner']:
check_patch_status_403(self, asset['id'], status, self.access_header)
# Move from 'pending' to 'pending' status
check_patch_status_200(self, asset['id'], 'pending', self.access_header)
# Add cancellationDetails document
add_cancellationDetails_document(self, asset)
# Move from 'pending' to 'deleted' status
check_patch_status_200(self, asset['id'], 'deleted', self.access_header)
asset = self.create_resource()
# Add cancellationDetails document
add_cancellationDetails_document(self, asset)
self.app.authorization = ('Basic', ('administrator', ''))
# Move from 'pending' to one of blacklist status
for status in STATUS_BLACKLIST['pending']['Administrator']:
check_patch_status_403(self, asset['id'], status)
# Move from 'pending' to 'pending' status
check_patch_status_200(self, asset['id'], 'pending')
# Move from 'pending' to 'verification' status
check_patch_status_200(self, asset['id'], 'verification')
# Move from 'verification' to 'pending' status
check_patch_status_200(self, asset['id'], 'pending')
# Move from 'pending' to 'deleted' status
check_patch_status_200(self, asset['id'], 'deleted')
self.app.authorization = ('Basic', ('broker', ''))
asset = self.create_resource()
self.app.authorization = ('Basic', ('concierge', ''))
# Move from 'pending' to one of blacklist status
for status in STATUS_BLACKLIST['pending']['concierge']:
check_patch_status_403(self, asset['id'], status)
# Move from 'pending' to 'pending' status
check_patch_status_200(self, asset['id'], 'pending')
# Move from 'pending' to 'verification' status
check_patch_status_200(self, asset['id'], 'verification')
self.app.authorization = ('Basic', ('broker', ''))
data = deepcopy(self.initial_data)
data['status'] = 'draft'
data['items'] = []
response = self.app.post_json('/', params={'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'draft')
self.assertNotIn('items', response.json['data'])
asset = response.json['data']
token = response.json['access']['token']
access_header = {'X-Access-Token': str(token)}
response = self.app.patch_json('/{}'.format(asset['id']), params={'data': {'status': 'pending'}}, headers=access_header, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]['description'],
'You cannot switch the asset status from draft to pending unless at least one item has been added.'
)
response = self.app.post_json('/{}/items'.format(asset['id']),
headers=access_header,
params={'data': self.initial_item_data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/{}'.format(asset['id']), params={'data': {'status': 'pending'}}, headers=access_header, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]['description'],
'You cannot switch the asset status from draft to pending unless at least one decision has been added.'
)
response = self.app.post_json('/{}/decisions'.format(asset['id']),
headers=access_header,
params={'data': self.initial_decision_data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/{}'.format(asset['id']), params={'data': {'status': 'pending'}}, headers=access_header)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'pending')
self.assertEqual(len(response.json['data']['items']), 1)
def administrator_change_delete_status(self):
response = self.app.get('/')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
self.app.authorization = ('Basic', ('broker', ''))
asset = self.create_resource()
response = self.app.get('/{}'.format(asset['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], asset)
add_cancellationDetails_document(self, asset)
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json(
'/{}'.format(asset['id']),
{'data': {'status': 'pending'}}
)
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json(
'/{}'.format(asset['id']),
{'data': {'status': 'deleted'}}
)
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'deleted'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (deleted) status")
def patch_decimal_item_quantity(self):
""" Testing different decimal quantity (decimal_numbers) at the root and items of assets."""
precision = self.precision if hasattr(self, 'precision') else 3
asset = self.create_resource()
response = self.app.post_json('/{}/items'.format(asset['id']),
headers=self.access_header,
params={'data': self.initial_item_data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
item_id = response.json["data"]['id']
self.assertIn(item_id, response.headers['Location'])
self.assertEqual(self.initial_item_data['description'], response.json["data"]["description"])
self.assertEqual(self.initial_item_data['quantity'], response.json["data"]["quantity"])
self.assertEqual(self.initial_item_data['address'], response.json["data"]["address"])
for quantity in [3, '3', 7.658, '7.658', 2.3355, '2.3355']:
item_data = deepcopy(self.initial_item_data)
item_data['quantity'] = quantity
response = self.app.patch_json('/{}/items/{}'.format(asset['id'], item_id),
headers=self.access_header,
params={'data': item_data})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/{}/items/{}'.format(asset['id'], item_id),
headers=self.access_header,
params={'data': item_data})
self.assertNotIsInstance(response.json['data']['quantity'], basestring)
rounded_quantity = round(float(quantity), precision)
self.assertEqual(response.json['data']['quantity'], rounded_quantity)
def rectificationPeriod_autocreation(self):
data = deepcopy(self.initial_data)
data['items'] = [deepcopy(test_loki_item_data)]
response = self.app.post_json('/', params={'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'draft')
asset = response.json['data']
token = response.json['access']['token']
access_header = {'X-Access-Token': str(token)}
# Add decision
decision_data = {
'decisionDate': get_now().isoformat(),
'decisionID': 'decisionLotID'
}
response = self.app.post_json(
'/{}/decisions'.format(asset['id']),
{"data": decision_data},
headers=access_header
)
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.json['data']['decisionDate'], decision_data['decisionDate'])
self.assertEqual(response.json['data']['decisionID'], decision_data['decisionID'])
self.decision_id = response.json['data']['id']
response = self.app.patch_json('/{}'.format(asset['id']), params={'data': {'status': 'pending'}}, headers=access_header)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('startDate', response.json['data']['rectificationPeriod'])
self.assertNotIn('endDate', response.json['data']['rectificationPeriod'])
rectificationPeriod_startDate = response.json['data']['rectificationPeriod']['startDate']
self.app.authorization = ('Basic', ('concierge', ''))
check_patch_status_200(self, asset['id'], 'verification')
post_related_process(self, asset['id'])
check_patch_status_200(self, asset['id'], 'active')
response = self.app.get('/{}'.format(asset['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['rectificationPeriod']['startDate'], rectificationPeriod_startDate)
self.assertIn('endDate', response.json['data']['rectificationPeriod'])
check_patch_status_200(self, asset['id'], 'pending')
response = self.app.get('/{}'.format(asset['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['rectificationPeriod']['startDate'], rectificationPeriod_startDate)
self.assertNotIn('endDate', response.json['data']['rectificationPeriod'])
def rectificationPeriod_endDate_remove(self):
data = deepcopy(self.initial_data)
data['items'] = [deepcopy(test_loki_item_data)]
response = self.app.post_json('/', params={'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'draft')
asset = response.json['data']
token = response.json['access']['token']
access_header = {'X-Access-Token': str(token)}
# Add decision
decision_data = {
'decisionDate': get_now().isoformat(),
'decisionID': 'decisionLotID'
}
response = self.app.post_json(
'/{}/decisions'.format(asset['id']),
{"data": decision_data},
headers=access_header
)
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.json['data']['decisionDate'], decision_data['decisionDate'])
self.assertEqual(response.json['data']['decisionID'], decision_data['decisionID'])
self.decision_id = response.json['data']['id']
response = self.app.patch_json('/{}'.format(asset['id']), params={'data': {'status': 'pending'}}, headers=access_header)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('startDate', response.json['data']['rectificationPeriod'])
self.assertNotIn('endDate', response.json['data']['rectificationPeriod'])
rectificationPeriod_startDate = response.json['data']['rectificationPeriod']['startDate']
self.app.authorization = ('Basic', ('concierge', ''))
check_patch_status_200(self, asset['id'], 'verification')
post_related_process(self, asset['id'])
check_patch_status_200(self, asset['id'], 'active')
response = self.app.get('/{}'.format(asset['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['rectificationPeriod']['startDate'], rectificationPeriod_startDate)
self.assertIn('endDate', response.json['data']['rectificationPeriod'])
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json(
'/{}'.format(asset['id']),
params={'data':
{'rectificationPeriod':
{
'endDate': None
}
}
},
)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('startDate', response.json['data']['rectificationPeriod'])
self.assertNotIn('endDate', response.json['data']['rectificationPeriod'])
response = self.app.get('/{}'.format(asset['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('startDate', response.json['data']['rectificationPeriod'])
self.assertNotIn('endDate', response.json['data']['rectificationPeriod'])
def asset_concierge_patch(self):
asset = self.create_resource()
response = self.app.get('/{}'.format(asset['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], asset)
# Move status from Draft to Pending
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'pending'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'pending')
self.app.authorization = ('Basic', ('concierge', ''))
# Move status from pending to verification
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'verification'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'verification')
# Move status from verification to Pending
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'pending'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'pending')
# Move status from pending to verification
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'verification'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'verification')
# Move status from verification to Active withour relatedProcess
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'active'}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]['description'][0],
'Asset must have related lot to become active.'
)
# Move status from verification to Active
relatedLot_id = uuid4().hex
response = post_related_process(self, asset['id'], relatedLot_id)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['relatedProcessID'], relatedLot_id)
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'active'}})
self.assertEqual(response.json['data']['status'], 'active')
# Move status from Active to Draft
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'draft'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't switch asset to draft status")
# Move status from Active to Deleted
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'deleted'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (active) status")
# Move status from Active to Pending
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'pending'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'pending')
self.assertNotIn('relatedLot', response.json['data'])
# Move status from Pending to Deleted
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'deleted'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (pending) status")
# Move status from Pending to Draft
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'draft'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't switch asset to draft status")
# Move status from Pending to Complete
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'complete'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (pending) status")
# Move status from pending to verification
response = self.app.patch_json('/{}'.format(asset['id']),
headers=self.access_header,
params={'data': {'status': 'verification'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'verification')
# Move status from verification to active
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'active'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'active')
# Move status from Active to Complete
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'complete'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'complete')
# Move status from Complete to Draft
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'deleted'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (complete) status")
# Move status from Complete to Pending
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'deleted'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (complete) status")
# Move status from Complete to Active
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'deleted'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (complete) status")
# Move status from Complete to Deleted
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'deleted'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (complete) status")
def administrator_change_complete_status(self):
response = self.app.get('/')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
self.app.authorization = ('Basic', ('broker', ''))
asset = self.create_resource()
response = self.app.get('/{}'.format(asset['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], asset)
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json(
'/{}'.format(asset['id']),
{'data': {'status': 'pending'}}
)
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json(
'/{}'.format(asset['id']),
{'data': {'status': 'verification'}}
)
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json(
'/{}'.format(asset['id']),
{'data': {'status': 'pending'}}
)
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json(
'/{}'.format(asset['id']),
{'data': {'status': 'verification'}}
)
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json(
'/{}'.format(asset['id']),
{'data': {'status': 'pending'}}
)
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json(
'/{}'.format(asset['id']),
{'data': {'status': 'verification'}}
)
self.assertEqual(response.status, '200 OK')
relatedLot_id = uuid4().hex
self.app.authorization = ('Basic', ('concierge', ''))
post_related_process(self, asset['id'], relatedLot_id)
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json(
'/{}'.format(asset['id']),
{'data': {'status': 'active'}}
)
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json(
'/{}'.format(asset['id']),
{'data': {'status': 'complete'}}
)
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/{}'.format(
asset['id']), {'data': {'status': 'deleted'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]['name'], u'data')
self.assertEqual(response.json['errors'][0]['location'], u'body')
self.assertEqual(response.json['errors'][0]['description'], u"Can't update asset in current (complete) status")
def change_verification_asset(self):
self.initial_status = 'verification'
response = self.app.get('/')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
asset = self.create_resource()
# Move from 'verification' to one of blacklist status
for status in STATUS_BLACKLIST['verification']['asset_owner']:
check_patch_status_403(self, asset['id'], status, self.access_header)
self.app.authorization = ('Basic', ('convoy', ''))
# Move from 'verification' to one of blacklist status
for status in STATUS_BLACKLIST['verification']['convoy']:
check_patch_status_403(self, asset['id'], status)
self.app.authorization = ('Basic', ('concierge', ''))
# Move from 'verification' to one of blacklist status
for status in STATUS_BLACKLIST['verification']['concierge']:
check_patch_status_403(self, asset['id'], status)
# Move from 'verification' to 'verification status
check_patch_status_200(self, asset['id'], 'verification')
# Move from 'verification to 'pending' status
check_patch_status_200(self, asset['id'], 'pending')
# Move from 'pending' to 'verification' status
check_patch_status_200(self, asset['id'], 'verification')
# Move from 'verification' to 'active' status
post_related_process(self, asset['id'])
check_patch_status_200(self, asset['id'], 'active')
self.app.authorization = ('Basic', ('broker', ''))
asset = self.create_resource()
self.app.authorization = ('Basic', ('administrator', ''))
# Move from 'verification' to one of blacklist status
for status in STATUS_BLACKLIST['verification']['Administrator']:
check_patch_status_403(self, asset['id'], status)
# Move from 'verification' to 'verification' status
check_patch_status_200(self, asset['id'], 'verification')
# Move from 'verification to 'pending' status
check_patch_status_200(self, asset['id'], 'pending')
# Move from 'pending' to 'verification' status
check_patch_status_200(self, asset['id'], 'verification')
# Move from 'verification' to 'active' status
self.app.authorization = ('Basic', ('concierge', ''))
post_related_process(self, asset['id'])
self.app.authorization = ('Basic', ('administrator', ''))
check_patch_status_200(self, asset['id'], 'active')
def change_active_asset(self):
self.initial_status = 'active'
response = self.app.get('/')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
asset = self.create_resource()
# Move from 'active' to one of blacklist status
for status in STATUS_BLACKLIST['active']['asset_owner']:
check_patch_status_403(self, asset['id'], status, self.access_header)
self.app.authorization = ('Basic', ('convoy', ''))
# Move from 'active' to one of blacklist status
for status in STATUS_BLACKLIST['active']['convoy']:
check_patch_status_403(self, asset['id'], status)
self.app.authorization = ('Basic', ('concierge', ''))
# Move from 'active' to one of blacklist status
for status in STATUS_BLACKLIST['active']['concierge']:
check_patch_status_403(self, asset['id'], status)
# Move from 'active' to 'active status
post_related_process(self, asset['id'])
check_patch_status_200(self, asset['id'], 'active')
# Move from 'active' to 'pending' status
check_patch_status_200(self, asset['id'], 'pending')
# Move from 'pending' to 'verification' status
check_patch_status_200(self, asset['id'], 'verification')
# Move from 'verification' to 'active' status
check_patch_status_200(self, asset['id'], 'active')
# Move from 'active' to 'complete' status
check_patch_status_200(self, asset['id'], 'complete')
self.app.authorization = ('Basic', ('broker', ''))
asset = self.create_resource()
self.app.authorization = ('Basic', ('administrator', ''))
# Move from 'active' to one of blacklist status
for status in STATUS_BLACKLIST['active']['Administrator']:
check_patch_status_403(self, asset['id'], status)
# Move from 'active' to 'active status
self.app.authorization = ('Basic', ('concierge', ''))
post_related_process(self, asset['id'])
self.app.authorization = ('Basic', ('administrator', ''))
check_patch_status_200(self, asset['id'], 'active')
# Move from 'active' to 'pending' status
check_patch_status_200(self, asset['id'], 'pending')
# Move from 'pending' to 'verification' status
check_patch_status_200(self, asset['id'], 'verification')
# Move from 'verification' to 'active' status
check_patch_status_200(self, asset['id'], 'active')
# Move from 'active' to 'complete' status
check_patch_status_200(self, asset['id'], 'complete')
| 43.315539
| 136
| 0.646199
| 5,137
| 45,438
| 5.618649
| 0.042632
| 0.13616
| 0.19842
| 0.100094
| 0.900184
| 0.880262
| 0.863251
| 0.842982
| 0.825832
| 0.820809
| 0
| 0.015911
| 0.183921
| 45,438
| 1,048
| 137
| 43.35687
| 0.762466
| 0.070536
| 0
| 0.745946
| 0
| 0
| 0.20718
| 0.002254
| 0
| 0
| 0
| 0
| 0.397297
| 1
| 0.02027
| false
| 0
| 0.012162
| 0.001351
| 0.033784
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
57bdbbb971430bc346dd2abb8bd29c58d4b67073
| 6,638
|
py
|
Python
|
tests/multitax/unit/test_init.py
|
pirovc/multitax
|
07bc27862f3e55ed5f106518dd2a5b8208482d32
|
[
"MIT"
] | 19
|
2021-03-19T06:39:06.000Z
|
2022-03-07T12:17:23.000Z
|
tests/multitax/unit/test_init.py
|
pirovc/multitax
|
07bc27862f3e55ed5f106518dd2a5b8208482d32
|
[
"MIT"
] | 2
|
2021-05-02T10:24:02.000Z
|
2021-05-02T10:32:38.000Z
|
tests/multitax/unit/test_init.py
|
pirovc/multitax
|
07bc27862f3e55ed5f106518dd2a5b8208482d32
|
[
"MIT"
] | null | null | null |
import unittest
from multitax import CustomTx
from multitax.multitax import MultiTax
class TestInit(unittest.TestCase):
# test data (14 nodes)
#
# rank-1 (root) 1 ___________
# / \ \
# rank-2 2.1 2.2 ______ \
# / \ \ \ \
# rank-3 3.1 3.2 3.4 \ \
# / / \ \ \ \
# rank-4 *4.1 *4.2 *4.3 *4.4 *4.5 *4.6
# / |
# rank-5 *5.1 *5.2
#
# names: 1: Node1, 2.1: Node2.1, ...,5.2: Node5.2
test_file = "tests/multitax/data_minimal/custom_unit_test.tsv.gz"
def test_default(self):
"""
test default values on empty init
"""
# Empty tax
tax = MultiTax()
self.assertEqual(tax.root_parent, "0")
self.assertEqual(tax.root_node, tax._default_root_node)
self.assertEqual(tax._default_urls, [])
self.assertEqual(tax._default_root_node, "1")
self.assertEqual(tax._nodes, {tax.root_node: '0'})
self.assertEqual(tax._names, {tax.root_node: 'root'})
self.assertEqual(tax._ranks, {tax.root_node: 'root'})
self.assertEqual(tax._lineages, {})
self.assertEqual(tax._name_nodes, {})
self.assertEqual(tax._node_children, {})
self.assertEqual(tax._rank_nodes, {})
self.assertEqual(tax.undefined_node, None)
self.assertEqual(tax.undefined_name, None)
self.assertEqual(tax.undefined_rank, None)
self.assertEqual(tax.sources, [])
tax = CustomTx(files=self.test_file)
self.assertEqual(tax.root_parent, "0")
self.assertEqual(tax.root_node, tax._default_root_node)
self.assertEqual(tax._default_urls, [])
self.assertEqual(tax._default_root_node, "1")
self.assertEqual(tax._nodes[tax.root_node], "0")
self.assertEqual(tax._names[tax.root_node], "Node1")
self.assertEqual(tax._ranks[tax.root_node], "rank-1")
self.assertEqual(tax._lineages, {})
self.assertEqual(tax._name_nodes, {})
self.assertEqual(tax._node_children, {})
self.assertEqual(tax._rank_nodes, {})
self.assertEqual(tax.undefined_node, None)
self.assertEqual(tax.undefined_name, None)
self.assertEqual(tax.undefined_rank, None)
self.assertEqual(tax.sources, [self.test_file])
def test_root_values(self):
"""
test init changing root values
"""
# New root, not on tree
tax = MultiTax(root_node="root_n", root_parent="root_p", root_name="newRootName", root_rank="newRootRank")
self.assertEqual(tax.root_node, "root_n")
self.assertEqual(tax.root_parent, "root_p")
# Create new root node and link old default (1) {"root_n": "root_p", "1": "root_p"}
self.assertEqual(tax._nodes, {tax.root_node: tax.root_parent, tax._default_root_node: tax.root_node})
self.assertEqual(tax._names, {tax.root_node: 'newRootName'})
self.assertEqual(tax._ranks, {tax.root_node: 'newRootRank'})
# Root is a new node not in nodes
tax = CustomTx(files=self.test_file, root_node="root_n", root_parent="root_p", root_name="newRootName", root_rank="newRootRank")
self.assertEqual(tax.root_node, "root_n")
self.assertEqual(tax.root_parent, "root_p")
self.assertEqual(tax.stats()["nodes"], 15)
# Create new root node and link old default (1) {"root_n": "root_p", "1": "root_p"}
self.assertEqual(tax.parent(tax.root_node), tax.root_parent)
self.assertEqual(tax.name(tax.root_node), 'newRootName')
self.assertEqual(tax.rank(tax.root_node), 'newRootRank')
# Default root is linked to new root
self.assertEqual(tax.parent(tax._default_root_node), tax.root_node)
self.assertEqual(tax.name(tax._default_root_node), "Node1")
self.assertEqual(tax.rank(tax._default_root_node), "rank-1")
# Root is an existing node in nodes, but not default, filter tree under node
tax = CustomTx(files=self.test_file, root_node="4.4", root_parent="root_p", root_name="newRootName", root_rank="newRootRank")
self.assertEqual(tax.root_node, "4.4")
self.assertEqual(tax.root_parent, "root_p")
self.assertEqual(tax.stats()["nodes"], 3)
# Create new root node and link old default (1) {"root_n": "root_p", "1": "root_p"}
self.assertEqual(tax.parent(tax.root_node), tax.root_parent)
self.assertEqual(tax.name(tax.root_node), 'newRootName')
self.assertEqual(tax.rank(tax.root_node), 'newRootRank')
# default root should not exist
self.assertEqual(tax.parent(tax._default_root_node), tax.undefined_node)
self.assertEqual(tax.name(tax._default_root_node), tax.undefined_name)
self.assertEqual(tax.rank(tax._default_root_node), tax.undefined_rank)
def test_undefined_values(self):
"""
test init changing undefined values
"""
tax = MultiTax(undefined_node="unode", undefined_rank="urank", undefined_name="uname")
self.assertEqual(tax.undefined_node, "unode")
self.assertEqual(tax.undefined_name, "uname")
self.assertEqual(tax.undefined_rank, "urank")
self.assertEqual(tax.parent("XXX"), "unode")
self.assertEqual(tax.rank("XXX"), "urank")
self.assertEqual(tax.name("XXX"), "uname")
tax = CustomTx(files=self.test_file, undefined_node="unode", undefined_rank="urank", undefined_name="uname")
self.assertEqual(tax.undefined_node, "unode")
self.assertEqual(tax.undefined_name, "uname")
self.assertEqual(tax.undefined_rank, "urank")
self.assertEqual(tax.parent("XXX"), "unode")
self.assertEqual(tax.rank("XXX"), "urank")
self.assertEqual(tax.name("XXX"), "uname")
def test_build_values(self):
"""
test init changing undefined values
"""
tax = MultiTax(build_node_children=True, build_name_nodes=True, build_rank_nodes=True)
self.assertEqual(tax._name_nodes, {tax.name(tax.root_node): [tax.root_node]})
self.assertEqual(tax._node_children, {tax.root_parent: [tax.root_node]})
self.assertEqual(tax._rank_nodes, {"root": [tax.root_node]})
tax = CustomTx(files=self.test_file, build_node_children=True, build_name_nodes=True, build_rank_nodes=True)
self.assertNotEqual(len(tax._name_nodes), 0)
self.assertNotEqual(len(tax._node_children), 0)
self.assertNotEqual(len(tax._rank_nodes), 0)
| 46.41958
| 136
| 0.635432
| 846
| 6,638
| 4.743499
| 0.101655
| 0.254174
| 0.305009
| 0.080738
| 0.844007
| 0.811612
| 0.777972
| 0.730875
| 0.685771
| 0.62771
| 0
| 0.013664
| 0.228231
| 6,638
| 142
| 137
| 46.746479
| 0.769666
| 0.154565
| 0
| 0.505618
| 0
| 0
| 0.074027
| 0.009276
| 0
| 0
| 0
| 0
| 0.797753
| 1
| 0.044944
| false
| 0
| 0.033708
| 0
| 0.101124
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
aa05461a9f836142c077c1c1ea3390d0a1a3b30b
| 5,708
|
py
|
Python
|
src/regex_patterns.py
|
stfbk/mqttsa1
|
147291791a712129e699744936e39a5792d60d03
|
[
"Apache-2.0"
] | 24
|
2019-07-09T05:37:07.000Z
|
2022-03-28T16:43:00.000Z
|
src/regex_patterns.py
|
stfbk/mqttsa1
|
147291791a712129e699744936e39a5792d60d03
|
[
"Apache-2.0"
] | 6
|
2020-04-29T18:16:17.000Z
|
2022-03-12T12:23:29.000Z
|
src/regex_patterns.py
|
stfbk/mqttsa1
|
147291791a712129e699744936e39a5792d60d03
|
[
"Apache-2.0"
] | 6
|
2019-12-26T03:17:55.000Z
|
2022-03-12T15:00:02.000Z
|
import re
#Define regex patters to parse intercepted messages
pattern_test = re.compile("^([A-Z][0-9]+)+$")
# Regex for Mac addresses
pattern_mac_address = re.compile("([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])")
# Regex for IPv4 addresses
pattern_ipv4 = re.compile("([0-9]{1,3}[\.]){3}[0-9]{1,3}")
# Regex for Domain names
pattern_domain_names = re.compile("(http:\/\/www\.|https:\/\/www\.|http:\/\/|https:\/\/)?[a-z0-9]+([\-\.]{1}[a-z0-9]+)*\.[a-z]{2,5}(:[0-9]{1,5})?(\/.*)?")
# Regex for email addresses
pattern_email = re.compile("\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,6}\b")
# Regex for "pass/pss/key"
pattern_passw = re.compile("(pass|pss|key)")
# Regex for "device/iot/board"
pattern_iot = re.compile("(device|iot|board)")
# Regex from MQTT PWN
pattern_iot_2 = re.compile("(openHAB|HomeAssistant|Domoticz|HomeBridge|HomeSeer|SmartThings|SonWEB|Yeti|NodeRed|harmony|iobroker|zwave|sonoff|itead|owntracks)")
# Regex for "message/msg"
pattern_msg = re.compile("(message|msg)")
# Regex for "online/offline/state/statu"
pattern_status = re.compile("(online|offline|state|statu)")
# Regex for "endpoint/end-point/api"
pattern_endpoint = re.compile("(endpoint|end\-point|api)")
# Regex for dates
pattern_dates = re.compile("(([1-9]|[0-2][0-9]|(3)[0-1])(\/|\-|\.|\\\\)((0)?[1-9]|((1)[0-2]))(\/|\-|\.|\\\\)[0-9]{2,4})|(([0-9]{2,4})(\/|\-|\.|\\\\)(((0)?[1-9])|((1)[0-2]))(\/|\-|\.|\\\\)([1-9]|[0-2][0-9]|(3)[0-1]))")
# Regex for phone numbers with country codes
pattern_phones = re.compile("(\+263[0-9]{5,}|\+260[0-9]{5,}|\+967[0-9]{5,}|\+212[0-9]{5,}|\+681[0-9]{5,}|\+1-340[0-9]{5,}|\+84[0-9]{5,}|\+58[0-9]{5,}|\+379[0-9]{5,}|\+678[0-9]{5,}|\+998[0-9]{5,}|\+1[0-9]{5,}|\+598[0-9]{5,}|\+380[0-9]{5,}|\+44[0-9]{5,}|\+256[0-9]{5,}|\+971[0-9]{5,}|\+688[0-9]{5,}|\+1-649[0-9]{5,}|\+993[0-9]{5,}|\+90[0-9]{5,}|\+216[0-9]{5,}|\+1-868[0-9]{5,}|\+676[0-9]{5,}|\+690[0-9]{5,}|\+228[0-9]{5,}|\+66[0-9]{5,}|\+255[0-9]{5,}|\+992[0-9]{5,}|\+886[0-9]{5,}|\+963[0-9]{5,}|\+41[0-9]{5,}|\+46[0-9]{5,}|\+268[0-9]{5,}|\+47[0-9]{5,}|\+597[0-9]{5,}|\+249[0-9]{5,}|\+1-784[0-9]{5,}|\+508[0-9]{5,}|\+590[0-9]{5,}|\+1-758[0-9]{5,}|\+1-869[0-9]{5,}|\+290[0-9]{5,}|\+94[0-9]{5,}|\+34[0-9]{5,}|\+211[0-9]{5,}|\+82[0-9]{5,}|\+27[0-9]{5,}|\+252[0-9]{5,}|\+677[0-9]{5,}|\+386[0-9]{5,}|\+421[0-9]{5,}|\+1-721[0-9]{5,}|\+65[0-9]{5,}|\+232[0-9]{5,}|\+248[0-9]{5,}|\+381[0-9]{5,}|\+221[0-9]{5,}|\+966[0-9]{5,}|\+239[0-9]{5,}|\+378[0-9]{5,}|\+685[0-9]{5,}|\+590[0-9]{5,}|\+250[0-9]{5,}|\+7[0-9]{5,}|\+40[0-9]{5,}|\+262[0-9]{5,}|\+974[0-9]{5,}|\+1-787[0-9]{5,}|1-939[0-9]{5,}|\+351[0-9]{5,}|\+48[0-9]{5,}|\+64[0-9]{5,}|\+63[0-9]{5,}|\+51[0-9]{5,}|\+595[0-9]{5,}|\+675[0-9]{5,}|\+507[0-9]{5,}|\+970[0-9]{5,}|\+680[0-9]{5,}|\+92[0-9]{5,}|\+968[0-9]{5,}|\+47[0-9]{5,}|\+850[0-9]{5,}|\+1-670[0-9]{5,}|\+683[0-9]{5,}|\+234[0-9]{5,}|\+227[0-9]{5,}|\+505[0-9]{5,}|\+64[0-9]{5,}|\+687[0-9]{5,}|\+599[0-9]{5,}|\+31[0-9]{5,}|\+977[0-9]{5,}|\+674[0-9]{5,}|\+264[0-9]{5,}|\+258[0-9]{5,}|\+212[0-9]{5,}|\+1-664[0-9]{5,}|\+382[0-9]{5,}|\+976[0-9]{5,}|\+377[0-9]{5,}|\+373[0-9]{5,}|\+691[0-9]{5,}|\+52[0-9]{5,}|\+262[0-9]{5,}|\+230[0-9]{5,}|\+222[0-9]{5,}|\+692[0-9]{5,}|\+356[0-9]{5,}|\+223[0-9]{5,}|\+960[0-9]{5,}|\+60[0-9]{5,}|\+265[0-9]{5,}|\+261[0-9]{5,}|\+389[0-9]{5,}|\+853[0-9]{5,}|\+352[0-9]{5,}|\+370[0-9]{5,}|\+423[0-9]{5,}|\+218[0-9]{5,}|\+231[0-9]{5,}|\+266[0-9]{5,}|\+961[0-9]{5,}|\+371[0-9]{5,}|\+856[0-9]{5,}|\+996[0-9]{5,}|\+965[0-9]{5,}|\+383[0-9]{5,}|\+686[0-9]{5,}|\+254[0-9]{5,}|\+7[0-9]{5,}|\+962[0-9]{5,}|\+44-1534[0-9]{5,}|\+81[0-9]{5,}|\+1-876[0-9]{5,}|\+225[0-9]{5,}|\+39[0-9]{5,}|\+972[0-9]{5,}|\+44-1624[0-9]{5,}|\+353[0-9]{5,}|\+964[0-9]{5,}|\+98[0-9]{5,}|\+62[0-9]{5,}|\+91[0-9]{5,}|\+354[0-9]{5,}|\+36[0-9]{5,}|\+852[0-9]{5,}|\+504[0-9]{5,}|\+509[0-9]{5,}|\+592[0-9]{5,}|\+245[0-9]{5,}|\+224[0-9]{5,}|\+44-1481[0-9]{5,}|\+502[0-9]{5,}|\+1-671[0-9]{5,}|\+1-473[0-9]{5,}|\+299[0-9]{5,}|\+30[0-9]{5,}|\+350[0-9]{5,}|\+233[0-9]{5,}|\+49[0-9]{5,}|\+995[0-9]{5,}|\+220[0-9]{5,}|\+241[0-9]{5,}|\+689[0-9]{5,}|\+33[0-9]{5,}|\+358[0-9]{5,}|\+679[0-9]{5,}|\+298[0-9]{5,}|\+500[0-9]{5,}|\+251[0-9]{5,}|\+372[0-9]{5,}|\+291[0-9]{5,}|\+240[0-9]{5,}|\+503[0-9]{5,}|\+20[0-9]{5,}|\+593[0-9]{5,}|\+670[0-9]{5,}|\+1-809[0-9]{5,}|1-829[0-9]{5,}|1-849[0-9]{5,}|\+1-767[0-9]{5,}|\+253[0-9]{5,}|\+45[0-9]{5,}|\+420[0-9]{5,}|\+357[0-9]{5,}|\+599[0-9]{5,}|\+53[0-9]{5,}|\+385[0-9]{5,}|\+506[0-9]{5,}|\+682[0-9]{5,}|\+243[0-9]{5,}|\+242[0-9]{5,}|\+269[0-9]{5,}|\+57[0-9]{5,}|\+61[0-9]{5,}|\+61[0-9]{5,}|\+86[0-9]{5,}|\+56[0-9]{5,}|\+235[0-9]{5,}|\+236[0-9]{5,}|\+1-345[0-9]{5,}|\+238[0-9]{5,}|\+1[0-9]{5,}|\+237[0-9]{5,}|\+855[0-9]{5,}|\+257[0-9]{5,}|\+95[0-9]{5,}|\+226[0-9]{5,}|\+359[0-9]{5,}|\+673[0-9]{5,}|\+1-284[0-9]{5,}|\+246[0-9]{5,}|\+55[0-9]{5,}|\+267[0-9]{5,}|\+387[0-9]{5,}|\+591[0-9]{5,}|\+975[0-9]{5,}|\+1-441[0-9]{5,}|\+229[0-9]{5,}|\+501[0-9]{5,}|\+32[0-9]{5,}|\+375[0-9]{5,}|\+1-246[0-9]{5,}|\+880[0-9]{5,}|\+973[0-9]{5,}|\+1-242[0-9]{5,}|\+994[0-9]{5,}|\+43[0-9]{5,}|\+61[0-9]{5,}|\+297[0-9]{5,}|\+374[0-9]{5,}|\+54[0-9]{5,}|\+1-268[0-9]{5,}|\+672[0-9]{5,}|\+1-264[0-9]{5,}|\+244[0-9]{5,}|\+376[0-9]{5,}|\+1-684[0-9]{5,}|\+213[0-9]{5,}|\+355[0-9]{5,}|\+93[0-9]{5,})")
# Regex for mastercard/visa/american express numbers
pattern_cards = re.compile("(^4[0-9]{12}(?:[0-9]{3})?$|^(?:5[1-5][0-9]{2}|222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}$|^3[47][0-9]{13}$|^3(?:0[0-5]|[68][0-9])[0-9]{11}$|^6(?:011|5[0-9]{2})[0-9]{12}$)")
# Regex for directories
pattern_dir = re.compile("((\.)*((\\\\)+[A-Za-z0-9_\s]{1,})+(\.[A-Za-z0-9_\s]{1,})?)|((\.)*((\/)+[A-Za-z0-9_\s]{1,})+(\.[A-Za-z0-9_\s]{1,})?|path)")
# Regex for "lat/long/loc"
pattern_gps = re.compile("(lat|lon|loc)")
| 167.882353
| 3,680
| 0.46356
| 1,370
| 5,708
| 1.913869
| 0.241606
| 0.20061
| 0.278032
| 0.042715
| 0.099924
| 0.096873
| 0.086575
| 0.028223
| 0.019069
| 0.012204
| 0
| 0.288985
| 0.028206
| 5,708
| 34
| 3,681
| 167.882353
| 0.183703
| 0.083567
| 0
| 0
| 0
| 0.470588
| 0.89755
| 0.883272
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.058824
| 0.058824
| 0
| 0.058824
| 0
| 0
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
aa069975a2042dd2ae2a0951b06b978e93def839
| 9,329
|
py
|
Python
|
glyce/dataset_readers/bert_single_sent.py
|
TimSYQQX/glyce
|
1542ed30ce104c25aa5c69ffcc9cc5ef2fcda975
|
[
"Apache-2.0"
] | 396
|
2019-05-11T09:26:03.000Z
|
2022-03-30T11:08:23.000Z
|
glyce/dataset_readers/bert_single_sent.py
|
TimSYQQX/glyce
|
1542ed30ce104c25aa5c69ffcc9cc5ef2fcda975
|
[
"Apache-2.0"
] | 46
|
2019-06-03T07:41:40.000Z
|
2022-03-16T07:11:04.000Z
|
glyce/dataset_readers/bert_single_sent.py
|
TimSYQQX/glyce
|
1542ed30ce104c25aa5c69ffcc9cc5ef2fcda975
|
[
"Apache-2.0"
] | 75
|
2019-06-27T08:35:54.000Z
|
2022-03-29T01:23:19.000Z
|
# encoding: utf-8
"""
@author: Yuxian Meng
@contact: yuxian_meng@shannonai.com
@version: 1.0
@file: sentence_pair_processor
@time: 2019/4/8 14:58
这一行开始写关于本文件的说明与解释
"""
import os
import sys
root_path = "/".join(os.path.realpath(__file__).split("/")[:-3])
if root_path not in sys.path:
sys.path.insert(0, root_path)
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, \
SequentialSampler
import csv
import json
import logging
import argparse
import random
import numpy as np
from tqdm import tqdm
from glyce.dataset_readers.bert_data_utils import *
def read_json(file):
data = []
print("read json:")
with open(file, 'r', encoding='utf8') as f:
for line in tqdm(f.readlines()):
data.append(json.loads(line.strip()))
return data
class ChinaNewsProcessor(DataProcessor):
"""Processor for the dbqa data set """
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
read_json(os.path.join(data_dir, "valid.json")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
read_json(os.path.join(data_dir, "test.json")),
"test_matched")
def get_labels(self):
"""See base class."""
return ["1", "2", "3", "4", "5", "6", "7"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
# text may have multiple fields, join and separate by [SEP]
text_a = line["sentence"]
label = line["gold_label"]
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class DianPingProcessor(DataProcessor):
"""Processor for the dbqa data set """
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
read_json(os.path.join(data_dir, "valid.json")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
read_json(os.path.join(data_dir, "test.json")),
"test_matched")
def get_labels(self):
"""See base class."""
return ["1", "2"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
# text may have multiple fields, join and separate by [SEP]
text_a = line["sentence"]
label = line["gold_label"]
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class JDFullProcessor(DataProcessor):
"""Processor for the dbqa data set """
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
read_json(os.path.join(data_dir, "valid.json")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
read_json(os.path.join(data_dir, "test.json")),
"test_matched")
def get_labels(self):
"""See base class."""
return ["1", "2", "3", "4", "5"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
# text may have multiple fields, join and separate by [SEP]
text_a = line["sentence"]
label = line["gold_label"]
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class JDBinaryProcessor(DataProcessor):
"""Processor for the dbqa data set """
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_csv(os.path.join(data_dir, "train.csv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_csv(os.path.join(data_dir, "valid.csv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_csv(os.path.join(data_dir, "test.csv")),
"test_matched")
def get_labels(self):
"""See base class."""
return ["1", "2"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines[1:]):
guid = "%s-%s" % (set_type, i)
# text may have multiple fields, join and separate by [SEP]
text_a = "[SEP]".join(line[1:]).strip("[SEP]")
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class FuDanProcessor(DataProcessor):
"""Processor for the dbqa data set """
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
read_json(os.path.join(data_dir, "valid.json")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
read_json(os.path.join(data_dir, "test.json")),
"test_matched")
def get_labels(self):
"""See base class."""
return ["1", "2", "3", "4", "5"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
# text may have multiple fields, join and separate by [SEP]
text_a = line["doc"][:512]
label = str(line["gold_label"])
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class ChnSentiCorpProcessor(DataProcessor):
"""Processor for the ChnSentiCorp data set """
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
label = line[0]
text_a = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, label=label))
return examples
class ifengProcessor(DataProcessor):
"""Processor for the ifeng data set """
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_csv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_csv(os.path.join(data_dir, "valid.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_csv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["1", "2", "3", "4", "5"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
label = line[0]
text_a = "[SEP]".join(line[1:]).strip("[SEP]")
examples.append(InputExample(guid=guid, text_a=text_a, label=label))
return examples
| 31.305369
| 98
| 0.589774
| 1,191
| 9,329
| 4.418136
| 0.108312
| 0.055872
| 0.063854
| 0.095781
| 0.847586
| 0.847586
| 0.847586
| 0.847586
| 0.839985
| 0.839985
| 0
| 0.008081
| 0.270447
| 9,329
| 297
| 99
| 31.410774
| 0.76506
| 0.158538
| 0
| 0.734104
| 0
| 0
| 0.068572
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.208092
| false
| 0
| 0.063584
| 0
| 0.520231
| 0.00578
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
109ef27583edfebc6807daaaffa3185533b12d64
| 177
|
py
|
Python
|
test/unit/test_server_main.py
|
nklapste/undiscord
|
221b8387561494f1c721db21ef05729e0abb6b08
|
[
"MIT"
] | 3
|
2019-06-14T21:36:08.000Z
|
2020-12-21T09:25:30.000Z
|
test/unit/test_server_main.py
|
nklapste/undiscord
|
221b8387561494f1c721db21ef05729e0abb6b08
|
[
"MIT"
] | 3
|
2019-01-13T21:06:04.000Z
|
2019-01-14T06:56:44.000Z
|
test/unit/test_server_main.py
|
nklapste/undiscord
|
221b8387561494f1c721db21ef05729e0abb6b08
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import argparse
from undiscord.server.__main__ import get_parser
def test_get_parser():
assert isinstance(get_parser(), argparse.ArgumentParser)
| 17.7
| 60
| 0.751412
| 22
| 177
| 5.681818
| 0.727273
| 0.216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006536
| 0.135593
| 177
| 9
| 61
| 19.666667
| 0.810458
| 0.118644
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
10b320399c55da0026a785a59be78bedcd373629
| 2,891
|
py
|
Python
|
sgnlp/models/csgec/modules/conv_glu.py
|
raymondng76/sgnlp
|
f09eada90ef5b1ee979901e5c14413d32e758049
|
[
"MIT"
] | 14
|
2021-08-02T01:52:18.000Z
|
2022-01-14T10:16:02.000Z
|
sgnlp/models/csgec/modules/conv_glu.py
|
raymondng76/sgnlp
|
f09eada90ef5b1ee979901e5c14413d32e758049
|
[
"MIT"
] | 29
|
2021-08-02T01:53:46.000Z
|
2022-03-30T05:40:46.000Z
|
sgnlp/models/csgec/modules/conv_glu.py
|
raymondng76/sgnlp
|
f09eada90ef5b1ee979901e5c14413d32e758049
|
[
"MIT"
] | 7
|
2021-08-02T01:54:19.000Z
|
2022-01-07T06:37:45.000Z
|
from numpy import sqrt
import torch.nn as nn
import torch.nn.functional as F
class ConvGLU(nn.Module):
"""
CNN based encoder. Inputs are padded on both sides before passing through a 1D CNN, a GLU activation function, a skip connection, an optional dropout layer and a fully connected linear layer.
"""
def __init__(self, input_dim, kernel_size, dropout):
"""
input_dim : int
Encoder input (and output) embedding dimension size.
kernel_size : int
Kernel size / patch size. Number of tokens for each convolution.
dropout : float
Probability of setting each embedding dimension to 0 during training.
"""
super(ConvGLU, self).__init__()
self.conv = nn.Conv1d(
in_channels=input_dim,
out_channels=input_dim * 2, # note that this is multiplied by 2 for the GLU
kernel_size=kernel_size,
padding=int((kernel_size - 1) / 2),
)
self.dropout = nn.Dropout2d(dropout)
def forward(self, H):
"""
H : torch Tensor
Output from the previous encoder layer. Shape of (batch size, sequence length, hidden dim / number of "channels").
"""
residual_H = H
H = H.transpose(1, 2)
H = self.conv(H)
H = H.transpose(1, 2)
H = F.glu(H)
H = (H + residual_H) * sqrt(0.5)
return H
class ConvGLUDecoder(nn.Module):
"""
CNN based encoder. Inputs are padded on both sides before passing through a 1D CNN, a GLU activation function, a skip connection, an optional dropout layer and a fully connected linear layer.
"""
def __init__(self, input_dim, kernel_size, dropout, padding_idx):
"""
input_dim : int
Encoder input (and output) embedding dimension size.
kernel_size : int
Kernel size / patch size. Number of tokens for each convolution.
dropout : float
Probability of setting each embedding dimension to 0 during training.
"""
super(ConvGLUDecoder, self).__init__()
self.conv = nn.Conv1d(
in_channels=input_dim,
out_channels=input_dim * 2, # note that this is multiplied by 2 for the GLU
kernel_size=kernel_size,
padding=0,
)
self.padding_idx = padding_idx
self.kernel_size = kernel_size
def forward(self, H):
"""
H : torch Tensor
Output from the previous encoder layer. Shape of (batch size, sequence length, hidden dim / number of "channels").
"""
# print("H", H.shape)
H = H.transpose(1, 2)
H = F.pad(
H, (self.kernel_size - H.shape[2], 0), value=0
) # TODO Check the padding idx
H = self.conv(H)
H = H.transpose(1, 2)
H = F.glu(H)
return H
| 33.616279
| 195
| 0.595296
| 380
| 2,891
| 4.410526
| 0.265789
| 0.083532
| 0.041766
| 0.02864
| 0.807279
| 0.807279
| 0.807279
| 0.789976
| 0.789976
| 0.789976
| 0
| 0.013747
| 0.32065
| 2,891
| 85
| 196
| 34.011765
| 0.839613
| 0.447942
| 0
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011765
| 0
| 1
| 0.097561
| false
| 0
| 0.073171
| 0
| 0.268293
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
52bfb95b8653fb8680e5455898da00c99e95fdd0
| 35,461
|
py
|
Python
|
third_party/libSBML-5.9.0-Source/src/bindings/python/test/sbml/TestModel_newSetters.py
|
0u812/roadrunner
|
f464c2649e388fa1f5a015592b0b29b65cc84b4b
|
[
"Apache-2.0"
] | 5
|
2015-04-16T14:27:38.000Z
|
2021-11-30T14:54:39.000Z
|
third_party/libSBML-5.9.0-Source/src/bindings/python/test/sbml/TestModel_newSetters.py
|
0u812/roadrunner
|
f464c2649e388fa1f5a015592b0b29b65cc84b4b
|
[
"Apache-2.0"
] | 95
|
2015-03-06T12:14:06.000Z
|
2015-03-20T11:15:54.000Z
|
third_party/libSBML-5.9.0-Source/src/bindings/python/test/sbml/TestModel_newSetters.py
|
0u812/roadrunner
|
f464c2649e388fa1f5a015592b0b29b65cc84b4b
|
[
"Apache-2.0"
] | 7
|
2016-05-29T08:12:59.000Z
|
2019-05-02T13:39:25.000Z
|
#
# @file TestModel_newSetters.py
# @brief Model unit tests for new set function API
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestModel_newSetters.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestModel_newSetters(unittest.TestCase):
global M
M = None
def setUp(self):
self.M = libsbml.Model(2,4)
if (self.M == None):
pass
pass
def tearDown(self):
_dummyList = [ self.M ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartment1(self):
m = libsbml.Model(2,2)
c = libsbml.Compartment(2,2)
i = m.addCompartment(c)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
c.setId( "c")
i = m.addCompartment(c)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumCompartments() == 1 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartment2(self):
m = libsbml.Model(2,2)
c = libsbml.Compartment(2,1)
c.setId( "c")
i = m.addCompartment(c)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumCompartments() == 0 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartment3(self):
m = libsbml.Model(2,2)
c = libsbml.Compartment(1,2)
c.setId( "c")
i = m.addCompartment(c)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumCompartments() == 0 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartment4(self):
m = libsbml.Model(2,2)
c = None
i = m.addCompartment(c)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumCompartments() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartment5(self):
m = libsbml.Model(2,2)
c = libsbml.Compartment(2,2)
c.setId( "c")
c1 = libsbml.Compartment(2,2)
c1.setId( "c")
i = m.addCompartment(c)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumCompartments() == 1 )
i = m.addCompartment(c1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumCompartments() == 1 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ c1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartmentType1(self):
m = libsbml.Model(2,2)
ct = libsbml.CompartmentType(2,2)
i = m.addCompartmentType(ct)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
ct.setId( "ct")
i = m.addCompartmentType(ct)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumCompartmentTypes() == 1 )
_dummyList = [ ct ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartmentType2(self):
m = libsbml.Model(2,2)
ct = libsbml.CompartmentType(2,3)
ct.setId( "ct")
i = m.addCompartmentType(ct)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumCompartmentTypes() == 0 )
_dummyList = [ ct ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartmentType3(self):
m = libsbml.Model(2,2)
ct = None
i = m.addCompartmentType(ct)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumCompartmentTypes() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartmentType4(self):
m = libsbml.Model(2,2)
ct = libsbml.CompartmentType(2,2)
ct.setId( "ct")
ct1 = libsbml.CompartmentType(2,2)
ct1.setId( "ct")
i = m.addCompartmentType(ct)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumCompartmentTypes() == 1 )
i = m.addCompartmentType(ct1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumCompartmentTypes() == 1 )
_dummyList = [ ct ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ct1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addConstraint1(self):
m = libsbml.Model(2,2)
c = libsbml.Constraint(2,2)
i = m.addConstraint(c)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
c.setMath(libsbml.parseFormula("a+b"))
i = m.addConstraint(c)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumConstraints() == 1 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addConstraint2(self):
m = libsbml.Model(2,2)
c = libsbml.Constraint(2,3)
c.setMath(libsbml.parseFormula("a+b"))
i = m.addConstraint(c)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumConstraints() == 0 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addConstraint3(self):
m = libsbml.Model(2,2)
c = None
i = m.addConstraint(c)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumConstraints() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addEvent1(self):
m = libsbml.Model(2,2)
e = libsbml.Event(2,2)
t = libsbml.Trigger(2,2)
i = m.addEvent(e)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
t.setMath(libsbml.parseFormula("true"))
e.setTrigger(t)
i = m.addEvent(e)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
e.createEventAssignment()
i = m.addEvent(e)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumEvents() == 1 )
_dummyList = [ e ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addEvent2(self):
m = libsbml.Model(2,2)
e = libsbml.Event(2,1)
t = libsbml.Trigger(2,1)
t.setMath(libsbml.parseFormula("true"))
e.setTrigger(t)
e.createEventAssignment()
i = m.addEvent(e)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumEvents() == 0 )
_dummyList = [ e ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addEvent3(self):
m = libsbml.Model(2,2)
e = None
i = m.addEvent(e)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumEvents() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addEvent4(self):
m = libsbml.Model(2,2)
e = libsbml.Event(2,2)
t = libsbml.Trigger(2,2)
t.setMath(libsbml.parseFormula("true"))
e.setId( "e")
e.setTrigger(t)
e.createEventAssignment()
e1 = libsbml.Event(2,2)
e1.setId( "e")
e1.setTrigger(t)
e1.createEventAssignment()
i = m.addEvent(e)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumEvents() == 1 )
i = m.addEvent(e1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumEvents() == 1 )
_dummyList = [ e ]; _dummyList[:] = []; del _dummyList
_dummyList = [ e1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addFunctionDefinition1(self):
m = libsbml.Model(2,2)
fd = libsbml.FunctionDefinition(2,2)
i = m.addFunctionDefinition(fd)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
fd.setId( "fd")
i = m.addFunctionDefinition(fd)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
fd.setMath(libsbml.parseFormula("fd"))
i = m.addFunctionDefinition(fd)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumFunctionDefinitions() == 1 )
_dummyList = [ fd ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addFunctionDefinition2(self):
m = libsbml.Model(2,2)
fd = libsbml.FunctionDefinition(2,1)
fd.setId( "fd")
fd.setMath(libsbml.parseFormula("fd"))
i = m.addFunctionDefinition(fd)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumFunctionDefinitions() == 0 )
_dummyList = [ fd ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addFunctionDefinition3(self):
m = libsbml.Model(2,2)
fd = None
i = m.addFunctionDefinition(fd)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumFunctionDefinitions() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addFunctionDefinition4(self):
m = libsbml.Model(2,2)
fd = libsbml.FunctionDefinition(2,2)
fd.setId( "fd")
fd.setMath(libsbml.parseFormula("fd"))
fd1 = libsbml.FunctionDefinition(2,2)
fd1.setId( "fd")
fd1.setMath(libsbml.parseFormula("fd"))
i = m.addFunctionDefinition(fd)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumFunctionDefinitions() == 1 )
i = m.addFunctionDefinition(fd1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumFunctionDefinitions() == 1 )
_dummyList = [ fd ]; _dummyList[:] = []; del _dummyList
_dummyList = [ fd1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addInitialAssignment1(self):
m = libsbml.Model(2,2)
ia = libsbml.InitialAssignment(2,2)
i = m.addInitialAssignment(ia)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
ia.setSymbol( "i")
i = m.addInitialAssignment(ia)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
ia.setMath(libsbml.parseFormula("gg"))
i = m.addInitialAssignment(ia)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumInitialAssignments() == 1 )
_dummyList = [ ia ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addInitialAssignment2(self):
m = libsbml.Model(2,2)
ia = libsbml.InitialAssignment(2,3)
ia.setSymbol( "i")
ia.setMath(libsbml.parseFormula("gg"))
i = m.addInitialAssignment(ia)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumInitialAssignments() == 0 )
_dummyList = [ ia ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addInitialAssignment3(self):
m = libsbml.Model(2,2)
ia = None
i = m.addInitialAssignment(ia)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumInitialAssignments() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addInitialAssignment4(self):
m = libsbml.Model(2,2)
ia = libsbml.InitialAssignment(2,2)
ia.setSymbol( "ia")
ia.setMath(libsbml.parseFormula("a+b"))
ia1 = libsbml.InitialAssignment(2,2)
ia1.setSymbol( "ia")
ia1.setMath(libsbml.parseFormula("a+b"))
i = m.addInitialAssignment(ia)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumInitialAssignments() == 1 )
i = m.addInitialAssignment(ia1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumInitialAssignments() == 1 )
_dummyList = [ ia ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ia1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addParameter1(self):
m = libsbml.Model(2,2)
p = libsbml.Parameter(2,2)
i = m.addParameter(p)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
p.setId( "p")
i = m.addParameter(p)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumParameters() == 1 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addParameter2(self):
m = libsbml.Model(2,2)
p = libsbml.Parameter(2,1)
p.setId( "p")
i = m.addParameter(p)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumParameters() == 0 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addParameter3(self):
m = libsbml.Model(2,2)
p = libsbml.Parameter(1,2)
p.setId( "p")
i = m.addParameter(p)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumParameters() == 0 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addParameter4(self):
m = libsbml.Model(2,2)
p = None
i = m.addParameter(p)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumParameters() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addParameter5(self):
m = libsbml.Model(2,2)
p = libsbml.Parameter(2,2)
p.setId( "p")
p1 = libsbml.Parameter(2,2)
p1.setId( "p")
i = m.addParameter(p)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumParameters() == 1 )
i = m.addParameter(p1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumParameters() == 1 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addReaction1(self):
m = libsbml.Model(2,2)
r = libsbml.Reaction(2,2)
i = m.addReaction(r)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
r.setId( "r")
i = m.addReaction(r)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumReactions() == 1 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addReaction2(self):
m = libsbml.Model(2,2)
r = libsbml.Reaction(2,1)
r.setId( "r")
i = m.addReaction(r)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumReactions() == 0 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addReaction3(self):
m = libsbml.Model(2,2)
r = libsbml.Reaction(1,2)
r.setId( "r")
i = m.addReaction(r)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumReactions() == 0 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addReaction4(self):
m = libsbml.Model(2,2)
r = None
i = m.addReaction(r)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumReactions() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addReaction5(self):
m = libsbml.Model(2,2)
r = libsbml.Reaction(2,2)
r.setId( "r")
r1 = libsbml.Reaction(2,2)
r1.setId( "r")
i = m.addReaction(r)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumReactions() == 1 )
i = m.addReaction(r1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumReactions() == 1 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ r1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addRule1(self):
m = libsbml.Model(2,2)
r = libsbml.AssignmentRule(2,2)
i = m.addRule(r)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
r.setVariable( "f")
i = m.addRule(r)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
r.setMath(libsbml.parseFormula("a-n"))
i = m.addRule(r)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumRules() == 1 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addRule2(self):
m = libsbml.Model(2,2)
r = libsbml.AssignmentRule(2,1)
r.setVariable( "f")
r.setMath(libsbml.parseFormula("a-n"))
i = m.addRule(r)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumRules() == 0 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addRule3(self):
m = libsbml.Model(2,2)
r = libsbml.AssignmentRule(1,2)
r.setVariable( "f")
r.setMath(libsbml.parseFormula("a-n"))
i = m.addRule(r)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumRules() == 0 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addRule4(self):
m = libsbml.Model(2,2)
r = None
i = m.addRule(r)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumRules() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addRule5(self):
m = libsbml.Model(2,2)
ar = libsbml.AssignmentRule(2,2)
ar.setVariable( "ar")
ar.setMath(libsbml.parseFormula("a-j"))
ar1 = libsbml.AssignmentRule(2,2)
ar1.setVariable( "ar")
ar1.setMath(libsbml.parseFormula("a-j"))
i = m.addRule(ar)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumRules() == 1 )
i = m.addRule(ar1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumRules() == 1 )
_dummyList = [ ar ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ar1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpecies1(self):
m = libsbml.Model(2,2)
s = libsbml.Species(2,2)
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
s.setId( "s")
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
s.setCompartment( "c")
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumSpecies() == 1 )
_dummyList = [ s ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpecies2(self):
m = libsbml.Model(2,2)
s = libsbml.Species(2,1)
s.setId( "s")
s.setCompartment( "c")
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumSpecies() == 0 )
_dummyList = [ s ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpecies3(self):
m = libsbml.Model(2,2)
s = libsbml.Species(1,2)
s.setId( "s")
s.setCompartment( "c")
s.setInitialAmount(2)
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumSpecies() == 0 )
_dummyList = [ s ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpecies4(self):
m = libsbml.Model(2,2)
s = None
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumSpecies() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpecies5(self):
m = libsbml.Model(2,2)
s = libsbml.Species(2,2)
s.setId( "s")
s.setCompartment( "c")
s1 = libsbml.Species(2,2)
s1.setId( "s")
s1.setCompartment( "c")
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumSpecies() == 1 )
i = m.addSpecies(s1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumSpecies() == 1 )
_dummyList = [ s ]; _dummyList[:] = []; del _dummyList
_dummyList = [ s1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpeciesType1(self):
m = libsbml.Model(2,2)
st = libsbml.SpeciesType(2,2)
i = m.addSpeciesType(st)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
st.setId( "st")
i = m.addSpeciesType(st)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumSpeciesTypes() == 1 )
_dummyList = [ st ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpeciesType2(self):
m = libsbml.Model(2,2)
st = libsbml.SpeciesType(2,3)
st.setId( "st")
i = m.addSpeciesType(st)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumSpeciesTypes() == 0 )
_dummyList = [ st ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpeciesType3(self):
m = libsbml.Model(2,2)
st = None
i = m.addSpeciesType(st)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumSpeciesTypes() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpeciesType4(self):
m = libsbml.Model(2,2)
st = libsbml.SpeciesType(2,2)
st.setId( "st")
st1 = libsbml.SpeciesType(2,2)
st1.setId( "st")
i = m.addSpeciesType(st)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumSpeciesTypes() == 1 )
i = m.addSpeciesType(st1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumSpeciesTypes() == 1 )
_dummyList = [ st ]; _dummyList[:] = []; del _dummyList
_dummyList = [ st1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addUnitDefinition1(self):
m = libsbml.Model(2,2)
ud = libsbml.UnitDefinition(2,2)
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
ud.createUnit()
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
ud.setId( "ud")
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumUnitDefinitions() == 1 )
_dummyList = [ ud ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addUnitDefinition2(self):
m = libsbml.Model(2,2)
ud = libsbml.UnitDefinition(2,1)
ud.createUnit()
ud.setId( "ud")
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumUnitDefinitions() == 0 )
_dummyList = [ ud ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addUnitDefinition3(self):
m = libsbml.Model(2,2)
ud = libsbml.UnitDefinition(1,2)
ud.createUnit()
ud.setId( "ud")
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumUnitDefinitions() == 0 )
_dummyList = [ ud ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addUnitDefinition4(self):
m = libsbml.Model(2,2)
ud = None
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumUnitDefinitions() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addUnitDefinition5(self):
m = libsbml.Model(2,2)
ud = libsbml.UnitDefinition(2,2)
ud.setId( "ud")
ud.createUnit()
ud1 = libsbml.UnitDefinition(2,2)
ud1.setId( "ud")
ud1.createUnit()
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumUnitDefinitions() == 1 )
i = m.addUnitDefinition(ud1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumUnitDefinitions() == 1 )
_dummyList = [ ud ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ud1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createCompartment(self):
m = libsbml.Model(2,2)
p = m.createCompartment()
self.assert_( m.getNumCompartments() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createCompartmentType(self):
m = libsbml.Model(2,2)
p = m.createCompartmentType()
self.assert_( m.getNumCompartmentTypes() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createConstraint(self):
m = libsbml.Model(2,2)
p = m.createConstraint()
self.assert_( m.getNumConstraints() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createEvent(self):
m = libsbml.Model(2,2)
p = m.createEvent()
self.assert_( m.getNumEvents() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createEventAssignment(self):
m = libsbml.Model(2,2)
p = m.createEvent()
ea = m.createEventAssignment()
self.assert_( p.getNumEventAssignments() == 1 )
self.assert_( (ea).getLevel() == 2 )
self.assert_( (ea).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createFunctionDefinition(self):
m = libsbml.Model(2,2)
p = m.createFunctionDefinition()
self.assert_( m.getNumFunctionDefinitions() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createInitialAssignment(self):
m = libsbml.Model(2,2)
p = m.createInitialAssignment()
self.assert_( m.getNumInitialAssignments() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createKineticLaw(self):
m = libsbml.Model(2,2)
p = m.createReaction()
kl = m.createKineticLaw()
self.assert_( p.isSetKineticLaw() == True )
self.assert_( (kl).getLevel() == 2 )
self.assert_( (kl).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createKineticLawParameters(self):
m = libsbml.Model(2,2)
r = m.createReaction()
kl = m.createKineticLaw()
p = m.createKineticLawParameter()
self.assert_( r.isSetKineticLaw() == True )
self.assert_( kl.getNumParameters() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createModifier(self):
m = libsbml.Model(2,2)
p = m.createReaction()
sr = m.createModifier()
self.assert_( p.getNumModifiers() == 1 )
self.assert_( (sr).getLevel() == 2 )
self.assert_( (sr).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createParameter(self):
m = libsbml.Model(2,2)
p = m.createParameter()
self.assert_( m.getNumParameters() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createProduct(self):
m = libsbml.Model(2,2)
p = m.createReaction()
sr = m.createProduct()
self.assert_( p.getNumProducts() == 1 )
self.assert_( (sr).getLevel() == 2 )
self.assert_( (sr).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createReactant(self):
m = libsbml.Model(2,2)
p = m.createReaction()
sr = m.createReactant()
self.assert_( p.getNumReactants() == 1 )
self.assert_( (sr).getLevel() == 2 )
self.assert_( (sr).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createReaction(self):
m = libsbml.Model(2,2)
p = m.createReaction()
self.assert_( m.getNumReactions() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createRule(self):
m = libsbml.Model(2,2)
p = m.createAssignmentRule()
self.assert_( m.getNumRules() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createSpecies(self):
m = libsbml.Model(2,2)
p = m.createSpecies()
self.assert_( m.getNumSpecies() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createSpeciesType(self):
m = libsbml.Model(2,2)
p = m.createSpeciesType()
self.assert_( m.getNumSpeciesTypes() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createUnit(self):
m = libsbml.Model(2,2)
p = m.createUnitDefinition()
u = m.createUnit()
self.assert_( p.getNumUnits() == 1 )
self.assert_( (u).getLevel() == 2 )
self.assert_( (u).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createUnitDefinition(self):
m = libsbml.Model(2,2)
p = m.createUnitDefinition()
self.assert_( m.getNumUnitDefinitions() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_setId1(self):
id = "1e1";
i = self.M.setId(id)
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assertEqual( False, self.M.isSetId() )
pass
def test_Model_setId2(self):
id = "e1";
i = self.M.setId(id)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_(( id == self.M.getId() ))
self.assertEqual( True, self.M.isSetId() )
i = self.M.setId("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetId() )
pass
def test_Model_setId3(self):
id = "e1";
i = self.M.setId(id)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_(( id == self.M.getId() ))
self.assertEqual( True, self.M.isSetId() )
i = self.M.unsetId()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetId() )
pass
def test_Model_setModelHistory1(self):
self.M.setMetaId("_001")
mh = libsbml.ModelHistory()
i = self.M.setModelHistory(mh)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assertEqual( False, self.M.isSetModelHistory() )
i = self.M.unsetModelHistory()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetModelHistory() )
_dummyList = [ mh ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_setModelHistory2(self):
self.M.setMetaId("_001")
i = self.M.setModelHistory(None)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetModelHistory() )
i = self.M.unsetModelHistory()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetModelHistory() )
pass
def test_Model_setName1(self):
name = "3Set_k2";
i = self.M.setName(name)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.M.isSetName() )
pass
def test_Model_setName2(self):
name = "Set k2";
i = self.M.setName(name)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_(( name == self.M.getName() ))
self.assertEqual( True, self.M.isSetName() )
i = self.M.unsetName()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetName() )
pass
def test_Model_setName3(self):
i = self.M.setName("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetName() )
pass
def test_Model_setName4(self):
m = libsbml.Model(1,2)
i = m.setName( "11dd")
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assertEqual( False, m.isSetName() )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestModel_newSetters))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| 34.663734
| 79
| 0.63543
| 4,202
| 35,461
| 5.128986
| 0.082104
| 0.102543
| 0.122773
| 0.080178
| 0.812036
| 0.782433
| 0.761507
| 0.742762
| 0.726012
| 0.691212
| 0
| 0.018844
| 0.212825
| 35,461
| 1,022
| 80
| 34.697652
| 0.753242
| 0.037844
| 0
| 0.739179
| 0
| 0
| 0.004841
| 0
| 0
| 0
| 0
| 0
| 0.260821
| 1
| 0.09323
| false
| 0.09323
| 0.00333
| 0
| 0.099889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
52c870b8150a3d03b5df89fdad73112f56766e9b
| 48
|
py
|
Python
|
algorithm/sampling/__init__.py
|
qwe79137/JumpStarter
|
e59ee341f31d7cc9fde05b6f395d29d4d63130e4
|
[
"MIT"
] | 18
|
2021-05-15T05:38:11.000Z
|
2022-03-15T22:22:33.000Z
|
algorithm/sampling/__init__.py
|
qwe79137/JumpStarter
|
e59ee341f31d7cc9fde05b6f395d29d4d63130e4
|
[
"MIT"
] | 1
|
2022-01-05T12:02:27.000Z
|
2022-03-20T02:49:51.000Z
|
algorithm/sampling/__init__.py
|
qwe79137/JumpStarter
|
e59ee341f31d7cc9fde05b6f395d29d4d63130e4
|
[
"MIT"
] | 4
|
2021-06-11T08:29:55.000Z
|
2022-03-04T08:55:53.000Z
|
from .localized_sample import localized_sample
| 16
| 46
| 0.875
| 6
| 48
| 6.666667
| 0.666667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104167
| 48
| 2
| 47
| 24
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
eabc147c8d5420cc17ccb6f6aa5ac39c07073c3d
| 126
|
py
|
Python
|
src/martian/tests/testpackage/one.py
|
bielbienne/martian
|
fad3a1e7ae9aba46abd344237a439853d1097a8a
|
[
"ZPL-2.1"
] | null | null | null |
src/martian/tests/testpackage/one.py
|
bielbienne/martian
|
fad3a1e7ae9aba46abd344237a439853d1097a8a
|
[
"ZPL-2.1"
] | null | null | null |
src/martian/tests/testpackage/one.py
|
bielbienne/martian
|
fad3a1e7ae9aba46abd344237a439853d1097a8a
|
[
"ZPL-2.1"
] | null | null | null |
import animal
class Whale(animal.Animal):
pass
class Dragon(animal.Animal):
pass
class SpermWhale(Whale):
pass
| 11.454545
| 28
| 0.706349
| 16
| 126
| 5.5625
| 0.4375
| 0.269663
| 0.359551
| 0.47191
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206349
| 126
| 10
| 29
| 12.6
| 0.89
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.428571
| 0.142857
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
eae41291099c8e297f8bfe788197c589aa877091
| 31,008
|
py
|
Python
|
histcensusgis/text/standardize.py
|
graziul/hist-census-gis
|
558bf38cd0e444b5a91133dd70c88210da3cbbc9
|
[
"MIT"
] | 4
|
2017-05-15T20:54:25.000Z
|
2019-01-30T19:04:24.000Z
|
histcensusgis/text/standardize.py
|
graziul/hist-census-gis
|
558bf38cd0e444b5a91133dd70c88210da3cbbc9
|
[
"MIT"
] | null | null | null |
histcensusgis/text/standardize.py
|
graziul/hist-census-gis
|
558bf38cd0e444b5a91133dd70c88210da3cbbc9
|
[
"MIT"
] | 1
|
2017-07-12T18:06:19.000Z
|
2017-07-12T18:06:19.000Z
|
#
# Function to clean street direction and street type
#
# Author: Amory Kisch
# Date: 7/17/16
#
from __future__ import print_function
import re
# Standardize street (microdata/grid)
def standardize_street(st):
#TODO: Deal with multiple TYPES (e.g. "3rd Pl/St")
runAgain = False
#Special case: More characters after \n\r - solution is to split on \n and take what's before
st = st.split('\n')[0]
st = st.rstrip('\n')
orig_st = st
st = st.lower()
###Remove Punctuation, extraneous words at end of stname###
st = re.sub(r'[\.,]',' ',st)
st = re.sub(' +',' ',st)
st = st.strip()
st = re.sub('\\\\','',st)
st = re.sub(r' \(?([Cc][Oo][Nn][\'Tt]*d?|[Cc][Oo][Nn][Tt][Ii][Nn][Uu][Ee][Dd])\)?$','',st)
st = st.replace('(','').replace(')','')
#fix 'Ave. "L"' (found in SM descript for NYC)
if re.search('"[a-z]"$',st) :
st = st.replace('"','')
#consider extended a diff stname#
#st = re.sub(r' [Ee][XxsS][tdDT]+[^ ]*$','',st)
#Check if st is empty or blank and return empty to [st,DIR,NAME,TYPE]
if st == '' or st == ' ':
return ['','','','']
###stname part analysis###
DIR = ''
NAME = ''
TYPE = ''
# Combinations of directions at end of stname (has to be run first)
if re.search(r'[ \-]+([Nn][\.\-]?[\s]?[Ee][\.]?|[Nn]ortheast|[Nn]orth\s+?[Ee]ast)$',st):
st = "NE "+re.sub(r'[ \-]+([Nn][\.\-]?[\s]?[Ee][\.]?|[Nn]ortheast|[Nn]orth[\s]+?[Ee]ast)$','',st)
DIR = 'NE'
if re.search(r'[ \-]+([Nn][\.\-]?[\s]?[Ww][\.]?|[Nn]orthwest|[Nn]orth\s+?[Ww]est)$',st):
st = "NW "+re.sub(r'[ \-]+([Nn][\.\-]?[\s]?[Ww][\.]?|[Nn]orthwest|[Nn]orth\s+?[Ww]est)$','',st)
DIR = 'NW'
if re.search(r'[ \-]+([Ss][\.\-]?[\s]?[Ee][\.]?|[Ss]outheast|[Ss]outh\s+?[Ee]ast)$',st):
st = "SE "+re.sub(r'[ \-]+([Ss][\.\-]?[\s]?[Ee][\.]?|[Ss]outheast|[Ss]outh\s+?[Ee]ast)$','',st)
DIR = 'SE'
if re.search(r'[ \-]+([Ss][\.\-]?[\s]?[Ww][\.]?|[Ss]outhwest|[Ss]outh\s+?[Ww]est)$',st):
st = "SW "+re.sub(r'[ \-]+([Ss][\.\-]?[\s]?[Ww][\.]?|[Ss]outhwest|[Ss]outh\s+?[Ww]est)$','',st)
DIR = 'SW'
#First check if DIR is at end of stname. make sure that it's the DIR and not actually the NAME (e.g. "North Ave" or "Avenue E")#
if re.search(r'[ \-]+([Nn]|[Nn][Oo][Rr]?[Tt]?[Hh]?e?)$',st) and not re.match('^[Nn][Oo][Rr][Tt][Hh]$|^[Aa][Vv][Ee]([Nn][Uu][Ee])?[ \-]+[Nn]$',st) :
st = "N "+re.sub(r'[ \-]+([Nn]|[Nn][Oo][Rr]?[Tt]?[Hh]?)$','',st)
DIR = 'N'
if re.search(r'[ \-]+([Ss]|[Ss][Oo][Uu]?[Tt]?[Hh]?)$',st) and not re.search('^[Ss][Oo][Uu][Tt][Hh]$|^[Aa][Vv][Ee]([Nn][Uu][Ee])?[ \-]+[Ss]$',st) :
st = "S "+re.sub(r'[ \-]+([Ss]|[Ss][Oo][Uu]?[Tt]?[Hh]?)$','',st)
DIR = 'S'
if re.search(r'[ \-]+([Ww][Ee][Ss][Tt]|[Ww])$',st) and not re.search('^[Ww][Ee][Ss][Tt]$|^[Aa][Vv][Ee]([Nn][Uu][Ee])?[ \-]+[Ww]$',st) :
st = "W "+re.sub(r'[ \-]+([Ww][Ee][Ss][Tt]|[Ww])$','',st)
DIR = 'W'
if re.search(r'[ \-]+([Ee][Aa][Ss][Tt]|[Ee])$',st) and not re.search('^[Ee][Aa][Ss][Tt]$|^[Aa][Vv][Ee]([Nn][Uu][Ee])?[ \-]+[Ee]$',st) :
st = "E "+re.sub(r'[ \-]+([Ee][Aa][Ss][Tt]|[Ee])$','',st)
DIR = 'E'
#See if a st TYPE can be identified#
st = re.sub(r'[ \-]+([Ss][Tt][Rr]?[Ee]?[Ee]?[Tt]?[SsEe]?|[Ss][\.][Tt]|[Ss][Tt]?.?[Rr][Ee][Ee][Tt])$',' St',st)
#st = re.sub(r'[ \-]+[Ss]tr?e?e?t?[ \-]',' St ',st) # Fix things like "4th Street Place"
st = re.sub(r'[ \-]+([Aa][Vv]|[Aa][VvBb][Ee][Nn][Uu]?[EesS]?|[aA]veenue|[Aa]vn[e]?ue|[Aa][Vv][Ee])$',' Ave',st)
match = re.search("[Aa][Vv][Ee]?([Nn][Uu][Ee])?[ \-]+([a-zA-Z])$",st)
if match :
st = re.sub("([a-zA-Z])$","",st)
st = re.sub("[Aa][Vv][Ee]?([Nn][Uu][Ee])?[ \-]+",match.group(2)+" Ave",st)
st = re.sub(r'[ \-]+([Bb]\'?[Ll][Vv]\'?[Dd]|Bl\'?v\'?d|Blv|Blvi|Bly|Bldv|Bvld|Bol\'d|[Bb][Oo][Uu][Ll][EeAa]?[Vv]?[Aa]?[Rr]?[Dd]?)$',' Blvd',st)
st = re.sub(r'[ \-]+([Rr][Dd]|[Rr][Oo][Aa][Dd])$',' Road',st)
st = re.sub(r'[ \-]+[Dd][Rr][Ii]?[Vv]?[Ee]?$',' Drive',st)
st = re.sub(r'[ \-]+([Cc][Oo][Uu]?[Rr][Tt]|[Cc][Tt])$',' Ct',st)
st = re.sub(r'[ \-]+([Pp][Ll][Aa]?[Cc]?[Ee]?)$',' Pl',st)
st = re.sub(r'[ \-]+([Ss][Qq][Uu]?[Aa]?[Rr]?[Ee]?)$',' Sq',st)
st = re.sub(r'[ \-]+[Cc]ircle$',' Cir',st)
st = re.sub(r'[ \-]+([Pp]rkway|[Pp]arkway|[Pp]ark [Ww]ay|[Pp]kwa?y|[Pp]ky|[Pp]arkwy|[Pp]ra?kwa?y|[Pp]wy)$',' Pkwy',st)
st = re.sub(r'[ \-]+[Ww][Aa][Yy]$',' Way',st)
st = re.sub(r'[ \-]+[Aa][Ll][Ll]?[Ee]?[Yy]?$',' Aly',st)
st = re.sub(r'[ \-]+[Tt][Ee][Rr]+[EeAa]?[Cc]?[Ee]?$',' Ter',st)
st = re.sub(r'[ \-]+([Ll][Aa][Nn][Ee]|[Ll][Nn])$',' Ln',st)
st = re.sub(r'[ \-]+([Pp]lzaz|[Pp][Ll][Aa][Zz][Aa])$',' Plaza',st)
st = re.sub(r'[ \-]+([Hh]ighway)$',' Hwy',st)
st = re.sub(r'[ \-]+([Hh]eights?)$',' Heights',st)
# "Park" is not considered a valid TYPE because it should probably actually be part of NAME #
match = re.search(r' ([Ss]t|[Aa]ve|[Bb]lvd|[Pp]l|[Dd]rive|[Rr]oad|[Cc]t|[Rr]ailway|[Rr][Rr]|[Cc]ity[Ll]imits|[Hh]wy|[Ff]wy|[Pp]kwy|[Cc]ir|[Cc]ircuit|[Tt]er|[Ll]n|[Ww]ay|[Tt]rail|[Ss]q|[Aa]ly|[Bb]ridge|[Bb]ridgeway|[Ww]alk|[Hh]eights|[Cc]rescent|[Cc]reek|[Rr]iver|[Ll]ine|[Pp]laza|[Ee]splanade|[Cc]emetery|[Vv]iaduct|[Tt]rafficway|[Tt]rfy|[Tt]urnpike)$',st)
if match :
TYPE = match.group(1).title()
if TYPE == "Rr" :
TYPE = "RR"
st = re.sub(re.escape(match.group(1)),TYPE,st)
#Combinations of directions
match = re.search(r'^([Nn][Oo\.]?[\s]?[Ee][\.]?|[Nn]ortheast|[Nn]orth\s+?[Ee]ast)[ \-]+',st)
if match :
if st == match.group(0)+TYPE :
NAME = 'Northeast'
else :
st = "NE "+re.sub(r'^([Nn][Oo\.]?[\s]?[Ee][\.]?|[Nn]ortheast|[Nn]orth\s+?[Ee]ast)[ \-]+','',st)
DIR = 'NE'
match = re.search(r'^([Nn][Oo\.]?[\s]?[Ww][\.]?|[Nn]orthwest|[Nn]orth\s+?[Ww]est)[ \-]+',st)
if match :
if st == match.group(0)+TYPE :
NAME = 'Northwest'
else :
st = "NW "+re.sub(r'^([Nn][Oo\.]?[\s]?[Ww][\.]?|[Nn]orthwest|[Nn]orth\s+?[Ww]est)[ \-]+','',st)
DIR = 'NW'
match = re.search(r'^([Ss][Oo\.]?[\s]?[Ee][\.]?|[Ss]outheast|[Ss]outh\s+?[Ee]ast)[ \-]+',st)
if match :
if st == match.group(0)+TYPE :
NAME = 'Southeast'
else :
st = "SE "+re.sub(r'^([Ss][Oo\.]?[\s]?[Ee][\.]?|[Ss]outheast|[Ss]outh\s+?[Ee]ast)[ \-]+','',st)
DIR = 'SE'
match = re.search(r'^([Ss][Oo\.]?[\s]?[Ww][\.]?|[Ss]outhwest|[Ss]outh\s+?[Ww]est)[ \-]+',st)
if match :
if st == match.group(0)+TYPE :
NAME = 'Southwest'
else :
st = "SW "+re.sub(r'^([Ss][Oo\.]?[\s]?[Ww][\.]?|[Ss]outhwest|[Ss]outh\s+?[Ww]est)[ \-]+','',st)
DIR = 'SW'
#See if there is a st DIR. again, make sure that it's the DIR and not actually the NAME (e.g. North Ave, E St [not East St])
if(DIR=='') :
match = re.search(r'^([nN]|[Nn]\.|[Nn]o|[nN]o\.|[Nn][Oo][Rr][Tt]?[Hh]?)[ \-]+',st)
if match :
if st==match.group(0)+TYPE :
if len(match.group(1))>1 :
NAME = 'North'
else : NAME = 'N'
else :
st = "N "+re.sub(r'^([nN]|[Nn]\.|[Nn]o|[nN]o\.|[Nn][Oo][Rr][Tt]?[Hh]?)[ \-]+','',st)
DIR = 'N'
match = re.search(r'^([sS]|[Ss]\.|[Ss]o|[Ss]o\.|[Ss][Oo][Uu][Tt]?[Hh]?)[ \-]+',st)
if match :
if st==match.group(0)+TYPE :
if len(match.group(1))>1:
NAME = 'South'
else : NAME = 'S'
else :
st = "S "+re.sub(r'^([sS]|[Ss]\.|[Ss]o|[Ss]o\.|[Ss][Oo][Uu][Tt]?[Hh]?)[ \-]+','',st)
DIR = 'S'
match = re.search(r'^([wW]|[Ww]\.|[Ww][Ee][Ss]?[Tt]?[\.]?)[ \-]+',st)
if match :
if st==match.group(0)+TYPE :
if len(match.group(1))>1 :
NAME = 'West'
else : NAME = 'W'
else :
st = "W "+re.sub(r'^([wW]|[Ww]\.|[Ww][Ee][Ss]?[Tt]?[\.]?)[ \-]+','',st)
DIR = 'W'
match = re.search(r'^([eE]|[Ee][\.\,]|[Ee][Ee]?[Aa]?[Ss][Tt][\.]?|[Ee]a[Ss]?)[ \-]+',st)
if match :
if st==match.group(0)+TYPE :
if len(match.group(1))>1 :
NAME = 'East'
else : NAME = 'E'
else :
st = "E "+re.sub(r'^([eE]|[Ee][\.\,]|[Ee][Ee]?[Aa]?[Ss][Tt][\.]?|[Ee]a[Ss]?)[ \-]+','',st)
DIR = 'E'
#get the st NAME and standardize it
match = re.search('^'+DIR+'(.+)'+TYPE+'$',st)
if NAME=='' :
#If NAME is not 'North', 'West', etc...
if match :
NAME = match.group(1).strip()
#fix "D" St (found in SM descript for Spokane, at least)
if re.search('^"[a-z]"$',NAME) :
NAME = NAME.replace('"','')
#convert written-out numbers to digits
#TODO: Make these work for all exceptions (go thru text file with find)
#if re.search("[Tt]enth|Eleven(th)?|[Tt]wel[f]?th|[Tt]hirteen(th)?|Fourt[h]?een(th)?|[Ff]ift[h]?een(th)?|[Ss]event[h]?een(th)?|[Ss]event[h]?een(th)?|[eE]ighteen(th)?|[Nn]inet[h]?een(th)?|[Tt]wentieth|[Tt]hirtieth|[Ff]o[u]?rtieth|[Ff]iftieth|[Ss]ixtieth|[Ss]eventieth|[Ee]ightieth|[Nn]inetieth|Twenty[ \-]?|Thirty[ \-]?|Forty[ \-]?|Fifty[ \-]?|Sixty[ \-]?|Seventy[ \-]?|Eighty[ \-]?|Ninety[ \-]?|[Ff]irst|[Ss]econd|[Tt]hird|[Ff]ourth|[Ff]ifth|[Ss]ixth|[Ss]eventh|[Ee]ighth|[Nn]inth",st) :
NAME = re.sub("^[Tt]enth","10th",NAME)
NAME = re.sub("^[Ee]leven(th)?","11th",NAME)
NAME = re.sub("^[Tt]wel[fv]?e?th","12th",NAME)
NAME = re.sub("^[Tt]hirteen(th)?","13th",NAME)
NAME = re.sub("^[Ff]ourt[h]?een(th)?","14th",NAME)
NAME = re.sub("^[Ff]if[th]+een(th)?","15th",NAME)
NAME = re.sub("^[Ss]ixt[h]?een(th)?","16th",NAME)
NAME = re.sub("^[Ss]event[h]?een(th)?","17th",NAME)
NAME = re.sub("^[eE]ighteen(th)?","18th",NAME)
NAME = re.sub("^[Nn]inet[h]?e+n(th)?","19th",NAME)
NAME = re.sub("^[Tt]h?went[iy]eth","20th",NAME)
NAME = re.sub("^[Tt]hirt[iy]e?th","30th",NAME)
NAME = re.sub("^[Ff]o[u]?rt[iy]eth","40th",NAME)
NAME = re.sub("^[Ff]ift[iy]eth", "50th",NAME)
NAME = re.sub("^[Ss]ixt[iy]eth", "60th",NAME)
NAME = re.sub("^[Ss]event[iy]eth", "70th",NAME)
NAME = re.sub("^[Ee]ight[iy]eth", "80th",NAME)
NAME = re.sub("^[Nn]inet[iy]eth", "90th",NAME)
NAME = re.sub("[Tt]wenty[ \-]*","2",NAME)
NAME = re.sub("[Tt]hirty[ \-]*","3",NAME)
NAME = re.sub("[Ff]orty[ \-]*","4",NAME)
NAME = re.sub("[Ff]ifty[ \-]*","5",NAME)
NAME = re.sub("[Ss]ixty[ \-]*","6",NAME)
NAME = re.sub("[Ss]eventy[ \-]*","7",NAME)
NAME = re.sub("[Ee]ighty[ \-]*","8",NAME)
NAME = re.sub("[Nn]inety[ \-]*","9",NAME)
if re.search("(^|[0-9]+.*)([Ff]irst|[Oo]ne)$",NAME) : NAME = re.sub("([Ff]irst|[Oo]ne)$","1st",NAME)
if re.search("(^|[0-9]+.*)([Ss]econd|[Tt]wo)$",NAME) : NAME = re.sub("([Ss]econd|[Tt]wo)$","2nd",NAME)
if re.search("(^|[0-9]+.*)([Tt]hird|[Tt]hree)$",NAME) : NAME = re.sub("([Tt]hird|[Tt]hree)$","3rd",NAME)
if re.search("(^|[0-9]+.*)[Ff]our(th)?$",NAME) : NAME = re.sub("[Ff]our(th)?$","4th",NAME)
if re.search("(^|[0-9]+.*)([Ff]if?th|[Ff]ive)$",NAME) : NAME = re.sub("([Ff]if?th|[Ff]ive)$","5th",NAME)
if re.search("(^|[0-9]+.*)[Ss]ix(th)?$",NAME) : NAME = re.sub("[Ss]ix(th)?$","6th",NAME)
if re.search("(^|[0-9]+.*)[Ss]even(th)?$",NAME) : NAME = re.sub("[Ss]even(th)?$","7th",NAME)
if re.search("(^|[0-9]+.*)[Ee]igh?th?$",NAME) : NAME = re.sub("[Ee]igh?th?$","8th",NAME)
if re.search("(^|[0-9]+.*)[Nn]in(th|e)+$",NAME) : NAME = re.sub("[Nn]in(th|e)+$","9th",NAME)
if re.search("[0-9]+",NAME) :
if re.search("^[0-9]+$",NAME) : #if NAME is only numbers (no suffix), add the correct suffix
foo = True
suffixes = {'11':'11th','12':'12th','13':'13th','1':'1st','2':'2nd','3':'3rd','4':'4th','5':'5th','6':'6th','7':'7th','8':'8th','9':'9th','0':'0th'}
num = re.search("[0-9]+$",NAME).group(0)
suff = ''
# if num is not found in suffixes dict, remove leftmost digit until it is found... 113 -> 13 -> 13th; 24 -> 4 -> 4th
while(suff=='') :
try :
suff = suffixes[num]
except KeyError :
num = num[1:]
if len(num) == 0 :
break
if not suff == '' :
NAME = re.sub(num+'$',suff,NAME)
else :
# Fix incorrect suffixes e.g. "73d St" -> "73rd St"
if re.search("[23]d$",NAME) :
NAME = re.sub("3d","3rd",NAME)
NAME = re.sub("2d","2nd",NAME)
if re.search("1 [Ss]t|2 nd|3 rd|1[1-3] th|[04-9] th",NAME) :
try :
suff = re.search("[0-9] ([Sa-z][a-z])",NAME).group(1)
except :
print("NAME: "+NAME+", suff: "+suff+", st: "+st)
NAME = re.sub(" "+suff,suff,NAME)
# TODO: identify corner cases with numbers e.g. "51 and S- Hermit"
# This \/ is a bit overzealous...! #
hnum = re.search("^([0-9]+[ \-]+).+",NAME) #housenum in stname?
if hnum :
#False
NAME = re.sub(hnum.group(1),"",NAME) #remove housenum. May want to update housenum field, maybe not though.
runAgain = True
NAME = re.sub("(?:^| )[a-z]",lambda x:x.group(0).upper(),NAME)
else :
print('failed at "'+st,'"')
#return [st, DIR, NAME, TYPE]
assert(False)
# Standardize "St ____ Ave" -> "Saint ____ Ave" #
NAME = re.sub("^([Ss][Tt]\.?|[Ss][Aa][Ii][Nn][Tt])[ \-]","Saint ",NAME)
st = re.sub(re.escape(match.group(1).strip()),NAME,st,count=1).strip()
try :
assert st == (DIR+' '+NAME+' '+TYPE).strip()
except AssertionError :
pass
#print("Something went a bit wrong while trying to pre-standardize stnames.")
#print("orig was: "+orig_st)
#print("st is: \""+st+"\"")
#print("components: ["+(DIR+','+NAME+','+TYPE).strip()+"]")
if runAgain :
return standardize_street(st)
else :
return [st, DIR, NAME, TYPE]
# Standardize street (Steve Morse)
def sm_standardize(st) :
orig_st = st
st = st.strip()
st = st.replace("(","").replace(")","")
st = re.sub(r" [Ee][Xx][Tt][Ee]?[Nn]?[Dd]?[Ee]?[Dd]?$","",st)
DIR = re.search(r" ([NSEW ]+)$",st)
st = re.sub(r" ([NSEW ]+)$","",st)
if(DIR) :
DIR = DIR.group(1)
DIR = re.sub(" ","",DIR)
else :
DIR = ""
TYPE = re.search(r' (St|Street|Ave?|Avenue|Blvd|Pl|Dr|Drive|Rd|Road|Ct|Railway|Circuit|Hwy|Fwy|Pa?r?kwa?y|Pkwy|Cir|Terr?a?c?e?|La|Ln|Way|Trail|Sq|All?e?y?|Bridge|Bridgeway|Walk|Crescent|Creek|River|Line|Plaza|Esplanade|[Cc]emetery|Viaduct|Trafficway|Trfy|Turnpike|Park|Boundary|Home|Hsptl)$',st)
if(TYPE) :
st = re.sub(TYPE.group(0),"",st)
TYPE = TYPE.group(1)
if(TYPE=="Street") :
TYPE = "St"
if(TYPE=="Avenue") :
TYPE = "Ave"
if(TYPE=="Av") :
TYPE = "Ave"
if(TYPE=="Rd") :
TYPE = "Road"
if(TYPE=="Dr") :
TYPE = "Drive"
if(TYPE=="La") :
TYPE = "Ln"
if(re.match("Terr?a?c?e?",TYPE)) :
TYPE = "Ter"
if(re.match("Pa?r?kwa?y",TYPE)) :
TYPE = "Pkwy"
if(re.match("All?e?y?",TYPE)) :
TYPE = "Aly"
else :
if re.search("[Cc]ity [Ll]imits|[Rr]ailroad [Tt]racks",orig_st) :
TYPE = ""
elif st == "Broadway" :
TYPE = ""
else :
TYPE = "St"
NAME = st
st = (DIR+" "+NAME+" "+TYPE).strip()
#print(orig_st)
#print("changed to "+st)
return [st,DIR,NAME,TYPE]
# Standardize numbered streets [TODO: redundant code in standardize_street should use this instead]
def Num_Standardize(NAME) :
NAME = re.sub("^[Tt]e+nth","10th",NAME)
NAME = re.sub("^[Ee]leven(th)?","11th",NAME)
NAME = re.sub("^[Tt]wel[fv]?e?th","12th",NAME)
NAME = re.sub("^[Tt]hirte+n(th)?","13th",NAME)
NAME = re.sub("^[Ff]ourt[h]?e+n(th)?","14th",NAME)
NAME = re.sub("^[Ff]ift[h]?e+n(th)?","15th",NAME)
NAME = re.sub("^[Ss]ixt[h]?e+n(th)?","16th",NAME)
NAME = re.sub("^[Ss]event[h]?e+n(th)?","17th",NAME)
NAME = re.sub("^[eE]ighte+n(th)?","18th",NAME)
NAME = re.sub("^[Nn]inet[h]?e+n(th)?","19th",NAME)
NAME = re.sub("^[Tt]went[iy]eth","20th",NAME)
NAME = re.sub("^[Tt]hirt[iy]eth","30th",NAME)
NAME = re.sub("^[Ff]o[u]?rt[iy]eth","40th",NAME)
NAME = re.sub("^[Ff]ift[iy]eth", "50th",NAME)
NAME = re.sub("^[Ss]ixt[iy]eth", "60th",NAME)
NAME = re.sub("^[Ss]event[iy]eth", "70th",NAME)
NAME = re.sub("^[Ee]ight[iy]eth", "80th",NAME)
NAME = re.sub("^[Nn]inet[iy]eth", "90th",NAME)
NAME = re.sub("[Tt]wenty[ \-]*","2",NAME)
NAME = re.sub("[Tt]hirty[ \-]*","3",NAME)
NAME = re.sub("[Ff]orty[ \-]*","4",NAME)
NAME = re.sub("[Ff]ifty[ \-]*","5",NAME)
NAME = re.sub("[Ss]ixty[ \-]*","6",NAME)
NAME = re.sub("[Ss]eventy[ \-]*","7",NAME)
NAME = re.sub("[Ee]ighty[ \-]*","8",NAME)
NAME = re.sub("[Nn]inety[ \-]*","9",NAME)
if re.search("(^|[0-9]+.*)([Ff]irst|[Oo]ne)",NAME) : NAME = re.sub("([Ff]irst|[Oo]ne)","1st",NAME)
if re.search("(^|[0-9]+.*)([Ss]econd|[Tt]wo)",NAME) : NAME = re.sub("([Ss]econd|[Tt]wo)","2nd",NAME)
if re.search("(^|[0-9]+.*)([Tt]hird|[Tt]hree)",NAME) : NAME = re.sub("([Tt]hird|[Tt]hree)","3rd",NAME)
if re.search("(^|[0-9]+.*)[Ff]our(th)?",NAME) : NAME = re.sub("[Ff]our(th)?","4th",NAME)
if re.search("(^|[0-9]+.*)([Ff]ifth|[Ff]ive)",NAME) : NAME = re.sub("([Ff]ifth|[Ff]ive)","5th",NAME)
if re.search("(^|[0-9]+.*)[Ss]ix(th)?",NAME) : NAME = re.sub("[Ss]ix(th)?","6th",NAME)
if re.search("(^|[0-9]+.*)[Ss]even(th)?",NAME) : NAME = re.sub("[Ss]even(th)?","7th",NAME)
if re.search("(^|[0-9]+.*)[Ee]igh?th?",NAME) : NAME = re.sub("[Ee]igh?th?","8th",NAME)
if re.search("(^|[0-9]+.*)[Nn]in(th|e)+",NAME) : NAME = re.sub("[Nn]in(th|e)+","9th",NAME)
return NAME
#Returns just the NAME component of the street phrase, if any If second argument is True, return a list of all components
def isolate_st_name(st,whole_phrase = False) :
if (st == None or st == '' or st == -1) or (not isinstance(st, str) and not isinstance(st, unicode)) :
return ''
else :
TYPE = re.search(r' (St|Ave?|Blvd|Pl|Dr|Drive|Rd|Road|Ct|Railway|CityLimits|Hwy|Fwy|Pkwy|Cir|Terr?a?c?e?|La|Ln|Way|Trail|Sq|All?e?y?|Bridge|Bridgeway|Walk|Crescent|Creek|Rive?r?|Ocean|Bay|Canal|Sound|[Ll]ine|Plaza|Esplanade|[Cc]emetery|Viaduct|Trafficway|Trfy|Turnpike)$',st)
if(TYPE) :
TYPE = TYPE.group(0)
st = re.sub(TYPE+"$", "",st)
TYPE = TYPE.strip()
DIR = re.search("^[NSEW]+ ",st)
if(DIR) :
DIR = DIR.group(0)
st = re.sub("^"+DIR, "",st)
DIR = DIR.strip()
st = st.strip()
if whole_phrase :
return [DIR,st,TYPE]
else :
return st
#Function to standardize Steve Morse street names
def morse_standardize(st) :
if re.search('[Cc]ity [Ll]imits',st) :
return 'City Limits'
st = re.sub(" [Rr]iv($| St$)"," River",st)
st = re.sub("^Mt ","Mount ",st)
return st
#Function to standardize street for 1940 ED descriptions algorithm
def standardize_street_40_desc(st):
runAgain = False
st = st.rstrip('\n')
orig_st = st
if re.search("R[\.,]R[\.,]?$",st) :
return "Railway"
st = st.lower()
###Remove Punctuation, extraneous words at end of stname###
st = re.sub(r'[\.,\*!]',' ',st)
st = re.sub(' +',' ',st)
st = st.strip()
st = re.sub('\\\\','',st)
st = re.sub(r' \(?([Cc][Oo][Nn][\'Tt]*d?|[Cc][Oo][Nn][Tt][Ii][Nn][Uu][Ee][Dd])\)?$','',st)
#consider extended a diff stname#
st = re.sub(r" [Ee][Xx][Tt][Ee]?[Nn]?[Dd]?[Ee]?[Dd]?$","",st)
#Check if st is empty or blank and return empty to [st,DIR,NAME,TYPE]
if st == '' or st == ' ':
return ""
###stname part analysis###
DIR = ''
NAME = ''
TYPE = ''
# Combinations of directions at end of stname (has to be run first)
if re.search(r'[ \-]+([Nn][\.\-]?[\s]?[Ee][\.]?|[Nn]ortheast|[Nn]orth\s+?[Ee]ast)$',st):
st = "NE "+re.sub(r'[ \-]+([Nn][\.\-]?[\s]?[Ee][\.]?|[Nn]ortheast|[Nn]orth[\s]+?[Ee]ast)$','',st)
DIR = 'NE'
if re.search(r'[ \-]+([Nn][\.\-]?[\s]?[WwV\xa5][\.]?|[Nn]orthwest|[Nn]orth\s+?[Ww]est)$',st):
st = "NW "+re.sub(r'[ \-]+([Nn][\.\-]?[\s]?[WwV\xa5][\.]?|[Nn]orthwest|[Nn]orth\s+?[Ww]est)$','',st)
DIR = 'NW'
if re.search(r'[ \-]+([Ss][\.\-]?[\s]?[Ee][\.]?|[Ss]outheast|[Ss]outh\s+?[Ee]ast)$',st):
st = "SE "+re.sub(r'[ \-]+([Ss][\.\-]?[\s]?[Ee][\.]?|[Ss]outheast|[Ss]outh\s+?[Ee]ast)$','',st)
DIR = 'SE'
if re.search(r'[ \-]+([Ss][\.\-]?[\s]?[WwV\xa5][\.]?|[Ss]outhwest|[Ss]outh\s+?[Ww]est)$',st):
st = "SW "+re.sub(r'[ \-]+([Ss][\.\-]?[\s]?[WwV\xa5][\.]?|[Ss]outhwest|[Ss]outh\s+?[Ww]est)$','',st)
DIR = 'SW'
#First check if DIR is at end of stname. make sure that it's the DIR and not actually the NAME (e.g. "North Ave" or "Avenue E")#
if re.search(r'[ \-]+([Nn]|[Nn][Oo][Rr]?[Tt]?[Hh]?)$',st) and not re.match('^[Nn][Oo][Rr][Tt][Hh]$|[Aa][Vv][Ee]([Nn][Uu][Ee])?[ \-]+[Nn]$',st) :
st = "N "+re.sub(r'[ \-]+([Nn]|[Nn][Oo][Rr]?[Tt]?[Hh]?)$','',st)
DIR = 'N'
if re.search(r'[ \-]+([Ss]|[Ss][Oo][Uu]?[Tt]?[Hh]?)$',st) and not re.search('^[Ss][Oo][Uu][Tt][Hh]$|[Aa][Vv][Ee]([Nn][Uu][Ee])?[ \-]+[Ss]$',st) :
st = "S "+re.sub(r'[ \-]+([Ss]|[Ss][Oo][Uu]?[Tt]?[Hh]?)$','',st)
DIR = 'S'
if re.search(r'[ \-]+([Ww][Ee][Ss][Tt]|[Ww\xa5])$',st) and not re.search('^[Ww][Ee][Ss][Tt]$|[Aa][Vv][Ee]([Nn][Uu][Ee])?[ \-]+[Ww]$',st) :
st = "W "+re.sub(r'[ \-]+([Ww][Ee][Ss][Tt]|[Ww\xa5])$','',st)
DIR = 'W'
if re.search(r'[ \-]+([Ee][Aa][Ss][Tt]|[Ee])$',st) and not re.search('^[Ee][Aa][Ss][Tt]$|[Aa][Vv][Ee]([Nn][Uu][Ee])?[ \-]+[Ee]$',st) :
st = "E "+re.sub(r'[ \-]+([Ee][Aa][Ss][Tt]|[Ee])$','',st)
DIR = 'E'
#See if a st TYPE can be identified#
st = re.sub(r'[ \-]+([Ss][Tt][Rr]?[Ee]?[Ee]?[Tt]?[SsEe]?|[Ss][\.][Tt]|[Ss][Tt]?.?[Rr][Ee][Ee][Tt])$',' St',st)
#st = re.sub(r'[ \-]+[Ss]tr?e?e?t?[ \-]',' St ',st) # Fix things like "4th Street Place"
st = re.sub(r'[ \-]+([Aa][Vv]|[Aa][Vvw][Eeo&]|[Aa][VvBb][Ee][Nn][Uu]?[EesS]?|[aA]veenue|[Aa]vn[e]?ue|[Aa][Vv][Ee])$',' Ave',st)
match = re.search("[Aa][Vv][Ee]([Nn][Uu][Ee])?[ \-]+([a-zA-Z])$",st)
if match :
st = re.sub("([a-zA-Z])$","",st)
st = re.sub("[Aa][Vv][Ee]([Nn][Uu][Ee])?[ \-]+",match.group(2)+" Ave",st)
st = re.sub(r'[ \-]+([Bb]\'?[Ll][Vv]\'?[Dd]|Bl\'?v\'?d|Blv|Blvi|Bly|Bldv|Bvld|Bol\'d|[Bb][Oo][Uu][Ll][EeAa]?[Vv]?[Aa]?[Rr]?[Dd]?)$',' Blvd',st)
st = re.sub(r'[ \-]+([Rr][Dd]|[Rr][Oo][Aa][Dd])$',' Road',st)
st = re.sub(r'[ \-]+[Dd][Rr][Ii]?[Vv]?[Ee]?$',' Drive',st)
st = re.sub(r'[ \-]+([Cc][Oo][Uu]?[Rr][Tt]|[Cc][Tt])$',' Ct',st)
st = re.sub(r'[ \-]+([Pp][Ll][Aa]?[Cc]?[Ee]?)$',' Pl',st)
st = re.sub(r'[ \-]+([Ss][Qq][Uu]?[Aa]?[Rr]?[Ee]?)$',' Sq',st)
st = re.sub(r'[ \-]+[Cc]ircle$',' Cir',st)
st = re.sub(r'[ \-]+([Pp]rkway|[Pp]arkway|[Pp]ark [Ww]ay|[Pp]kwa?y|[Pp]ky|[Pp]arkwy|[Pp]rakway|[Pp]rkwy|[Pp]wy)$',' Pkwy',st)
st = re.sub(r'[ \-]+[Ww][Aa][Yy]$',' Way',st)
st = re.sub(r'[ \-]+[Aa][Ll][Ll]?[Ee]?[Yy]?$',' Aly',st)
st = re.sub(r'[ \-]+[Tt][Ee][Rr]+[EeAa]?[Cc]?[Ee]?$',' Ter',st)
st = re.sub(r'[ \-]+([Ll][Aa][Nn][Ee]|[Ll][Nn])$',' Ln',st)
st = re.sub(r'[ \-]+([Pp]lzaz|[Pp][Ll][Aa][Zz][Aa])$',' Plaza',st)
st = re.sub(r'[ \-]+([Hh]ighway)$',' Hwy',st)
st = re.sub(r'[ \-]+([Hh]eights?)$',' Heights',st)
# "Park" is not considered a valid TYPE because it should probably actually be part of NAME #
match = re.search(r' (St|Ave|Blvd|Pl|Drive|Road|Ct|Railway|CityLimits|Hwy|Fwy|Pkwy|Cir|Ter|Ln|Way|Trail|Sq|Aly|Bridge|Bridgeway|Walk|Heights|Crescent|Creek|River|Line|Plaza|Esplanade|[Cc]emetery|Viaduct|Trafficway|Trfy|Turnpike)$',st)
if match :
TYPE = match.group(1)
#Combinations of directions at beginning of name
match = re.search(r'^([Nn][Oo\.]?[\s]?[Ee][\.]?|[Nn]ortheast|[Nn]orth\s+?[Ee]ast)[ \-]+',st)
if match :
if st == match.group(0)+TYPE :
NAME = 'Northeast'
else :
st = "NE "+re.sub(r'^([Nn][Oo\.]?[\s]?[Ee][\.]?|[Nn]ortheast|[Nn]orth\s+?[Ee]ast)[ \-]+','',st)
DIR = 'NE'
match = re.search(r'^([Nn][Oo\.]?[\s]?[Ww][\.]?|[Nn]orthwest|[Nn]orth\s+?[Ww]est)[ \-]+',st)
if match :
if st == match.group(0)+TYPE :
NAME = 'Northwest'
else :
st = "NW "+re.sub(r'^([Nn][Oo\.]?[\s]?[Ww][\.]?|[Nn]orthwest|[Nn]orth\s+?[Ww]est)[ \-]+','',st)
DIR = 'NW'
match = re.search(r'^([Ss][Oo\.]?[\s]?[Ee][\.]?|[Ss]outheast|[Ss]outh\s+?[Ee]ast)[ \-]+',st)
if match :
if st == match.group(0)+TYPE :
NAME = 'Southeast'
else :
st = "SE "+re.sub(r'^([Ss][Oo\.]?[\s]?[Ee][\.]?|[Ss]outheast|[Ss]outh\s+?[Ee]ast)[ \-]+','',st)
DIR = 'SE'
match = re.search(r'^([Ss][Oo\.]?[\s]?[Ww][\.]?|[Ss]outhwest|[Ss]outh\s+?[Ww]est)[ \-]+',st)
if match :
if st == match.group(0)+TYPE :
NAME = 'Southwest'
else :
st = "SW "+re.sub(r'^([Ss][Oo\.]?[\s]?[Ww][\.]?|[Ss]outhwest|[Ss]outh\s+?[Ww]est)[ \-]+','',st)
DIR = 'SW'
#See if there is a st DIR. again, make sure that it's the DIR and not actually the NAME (e.g. North Ave, E St [not East St])
if(DIR=='') :
match = re.search(r'^([nN]|[Nn]\.|[Nn]o|[nN]o\.|[Nn][Oo][Rr][Tt]?[Hh]?)[ \-]+',st)
if match :
if st==match.group(0)+TYPE :
if len(match.group(1))>1 :
NAME = 'North'
else : NAME = 'N'
else :
st = "N "+re.sub(r'^([nN]|[Nn]\.|[Nn]o|[nN]o\.|[Nn][Oo][Rr][Tt]?[Hh]?)[ \-]+','',st)
DIR = 'N'
match = re.search(r'^([sS]|[Ss]\.|[Ss]o|[Ss]o\.|[Ss][Oo][Uu][Tt]?[Hh]?)[ \-]+',st)
if match :
if st==match.group(0)+TYPE :
if len(match.group(1))>1:
NAME = 'South'
else : NAME = 'S'
else :
st = "S "+re.sub(r'^([sS]|[Ss]\.|[Ss]o|[Ss]o\.|[Ss][Oo][Uu][Tt]?[Hh]?)[ \-]+','',st)
DIR = 'S'
match = re.search(r'^([wW]|[Ww]\.|[Ww][Ee][Ss]?[Tt]?[\.]?)[ \-]+',st)
if match :
if st==match.group(0)+TYPE :
if len(match.group(1))>1 :
NAME = 'West'
else : NAME = 'W'
else :
st = "W "+re.sub(r'^([wW]|[Ww]\.|[Ww][Ee][Ss]?[Tt]?[\.]?)[ \-]+','',st)
DIR = 'W'
match = re.search(r'^([eE]|[Ee][\.\,]|[Ee][Ee]?[Aa]?[Ss][Tt][\.]?|[Ee]a[Ss]?)[ \-]+',st)
if match :
if st==match.group(0)+TYPE :
if len(match.group(1))>1 :
NAME = 'East'
else : NAME = 'E'
else :
st = "E "+re.sub(r'^([eE]|[Ee][\.\,]|[Ee][Ee]?[Aa]?[Ss][Tt][\.]?|[Ee]a[Ss]?)[ \-]+','',st)
DIR = 'E'
#get the st NAME and standardize it
match = re.search('^'+DIR+'(.+)'+TYPE+'$',st)
if NAME=='' :
#If NAME is not 'North', 'West', etc...
if match :
NAME = match.group(1).strip()
#convert written-out numbers to digits
#TODO: Make these work for all exceptions (go thru text file with find)
#if re.search("[Tt]enth|Eleven(th)?|[Tt]wel[f]?th|[Tt]hirteen(th)?|Fourt[h]?een(th)?|[Ff]ift[h]?een(th)?|[Ss]event[h]?een(th)?|[Ss]event[h]?een(th)?|[eE]ighteen(th)?|[Nn]inet[h]?een(th)?|[Tt]wentieth|[Tt]hirtieth|[Ff]o[u]?rtieth|[Ff]iftieth|[Ss]ixtieth|[Ss]eventieth|[Ee]ightieth|[Nn]inetieth|Twenty[ \-]?|Thirty[ \-]?|Forty[ \-]?|Fifty[ \-]?|Sixty[ \-]?|Seventy[ \-]?|Eighty[ \-]?|Ninety[ \-]?|[Ff]irst|[Ss]econd|[Tt]hird|[Ff]ourth|[Ff]ifth|[Ss]ixth|[Ss]eventh|[Ee]ighth|[Nn]inth",st) :
NAME = re.sub("^[Tt]enth","10th",NAME)
NAME = re.sub("^[Ee]leven(th)?","11th",NAME)
NAME = re.sub("^[Tt]wel[fv]?e?th","12th",NAME)
NAME = re.sub("^[Tt]hirteen(th)?","13th",NAME)
NAME = re.sub("^[Ff]ourt[h]?een(th)?","14th",NAME)
NAME = re.sub("^[Ff]ift[h]?een(th)?","15th",NAME)
NAME = re.sub("^[Ss]ixt[h]?een(th)?","16th",NAME)
NAME = re.sub("^[Ss]event[h]?een(th)?","17th",NAME)
NAME = re.sub("^[eE]ighteen(th)?","18th",NAME)
NAME = re.sub("^[Nn]inet[h]?e+n(th)?","19th",NAME)
NAME = re.sub("^[Tt]went[iy]eth","20th",NAME)
NAME = re.sub("^[Tt]hirt[iy]eth","30th",NAME)
NAME = re.sub("^[Ff]o[u]?rt[iy]eth","40th",NAME)
NAME = re.sub("^[Ff]ift[iy]eth", "50th",NAME)
NAME = re.sub("^[Ss]ixt[iy]eth", "60th",NAME)
NAME = re.sub("^[Ss]event[iy]eth", "70th",NAME)
NAME = re.sub("^[Ee]ight[iy]eth", "80th",NAME)
NAME = re.sub("^[Nn]inet[iy]eth", "90th",NAME)
NAME = re.sub("[Tt]wenty[ \-]*","2",NAME)
NAME = re.sub("[Tt]hirty[ \-]*","3",NAME)
NAME = re.sub("[Ff]orty[ \-]*","4",NAME)
NAME = re.sub("[Ff]ifty[ \-]*","5",NAME)
NAME = re.sub("[Ss]ixty[ \-]*","6",NAME)
NAME = re.sub("[Ss]eventy[ \-]*","7",NAME)
NAME = re.sub("[Ee]ighty[ \-]*","8",NAME)
NAME = re.sub("[Nn]inety[ \-]*","9",NAME)
if re.search("(^|[0-9]+.*)([Ff]irst|[Oo]ne)$",NAME) : NAME = re.sub("([Ff]irst|[Oo]ne)$","1st",NAME)
if re.search("(^|[0-9]+.*)([Ss]econd|[Tt]wo)$",NAME) : NAME = re.sub("([Ss]econd|[Tt]wo)$","2nd",NAME)
if re.search("(^|[0-9]+.*)([Tt]hird|[Tt]hree)$",NAME) : NAME = re.sub("([Tt]hird|[Tt]hree)$","3rd",NAME)
if re.search("(^|[0-9]+.*)[Ff]our(th)?$",NAME) : NAME = re.sub("[Ff]our(th)?$","4th",NAME)
if re.search("(^|[0-9]+.*)([Ff]ifth|[Ff]ive)$",NAME) : NAME = re.sub("([Ff]ifth|[Ff]ive)$","5th",NAME)
if re.search("(^|[0-9]+.*)[Ss]ix(th)?$",NAME) : NAME = re.sub("[Ss]ix(th)?$","6th",NAME)
if re.search("(^|[0-9]+.*)[Ss]even(th)?$",NAME) : NAME = re.sub("[Ss]even(th)?$","7th",NAME)
if re.search("(^|[0-9]+.*)[Ee]igh?th?$",NAME) : NAME = re.sub("[Ee]igh?th?$","8th",NAME)
if re.search("(^|[0-9]+.*)[Nn]in(th|e)+$",NAME) : NAME = re.sub("[Nn]in(th|e)+$","9th",NAME)
if re.search("[0-9]+",NAME) :
if re.search("^[0-9]+$",NAME) : #if NAME is only numbers (no suffix), add the correct suffix
foo = True
suffixes = {'11':'11th','12':'12th','13':'13th','1':'1st','2':'2nd','3':'3rd','4':'4th','5':'5th','6':'6th','7':'7th','8':'8th','9':'9th','0':'0th'}
num = re.search("[0-9]+$",NAME).group(0)
suff = ''
# if num is not found in suffixes dict, remove leftmost digit until it is found... 113 -> 13 -> 13th; 24 -> 4 -> 4th
while(suff=='') :
try :
suff = suffixes[num]
except KeyError :
num = num[1:]
if len(num) == 0 :
break
if not suff == '' :
NAME = re.sub(num+'$',suff,NAME)
else :
# Fix incorrect suffixes e.g. "73d St" -> "73rd St"
if re.search("[23]d$",NAME) :
NAME = re.sub("3d","3rd",NAME)
NAME = re.sub("2d","2nd",NAME)
if re.search("1 [Ss]t|2 nd|3 rd|1[1-3] th|[04-9] th",NAME) :
try :
suff = re.search("[0-9] ([Sa-z][a-z])",NAME).group(1)
except :
print("NAME: "+NAME+", suff: "+suff+", st: "+st)
NAME = re.sub(" "+suff,suff,NAME)
# TODO: identify corner cases with numbers e.g. "51 and S- Hermit"
# This \/ is a bit overzealous...! #
hnum = re.search("^([0-9]+[ \-]+).+",NAME) #housenum in stname?
if hnum :
#False
NAME = re.sub(hnum.group(1),"",NAME) #remove housenum. May want to update housenum field, maybe not though.
runAgain = True
else :
NAME = NAME.title()
else :
assert(False)
# Standardize "St ____ Ave" -> "Saint ____ Ave" #
NAME = re.sub("^([Ss][Tt]\.?|[Ss][Aa][Ii][Nn][Tt])[ \-]","Saint ",NAME)
st = re.sub(re.escape(match.group(1).strip()),NAME,st,count=1).strip()
try :
assert st == (DIR+' '+NAME+' '+TYPE).strip()
except AssertionError :
pass
#print("Something went a bit wrong while trying to pre-standardize stnames.")
#print("orig was: "+orig_st)
#print("st is: \""+st+"\"")
#print("components: ["+(DIR+','+NAME+','+TYPE).strip()+"]")
if re.search("[Cc]ity [Ll]imits?",st) :
return "City Limits"
TYPE = re.search(r' (St|Ave?|Blvd|Pl|Dr|Drive|Rd|Road|Ct|Railway|CityLimits|Hwy|Fwy|Pkwy|Cir|Terr?a?c?e?|La|Ln|Way|Trail|Sq|All?e?y?|Bridge|Bridgeway|Walk|Crescent|Creek|Rive?r?|Ocean|Bay|Canal|Sound|[Ll]ine|Plaza|Esplanade|[Cc]emetery|Viaduct|Trafficway|Trfy|Turnpike)$',st)
if not TYPE :
st = st+" St"
return st
| 45.802068
| 491
| 0.507546
| 5,210
| 31,008
| 3.01286
| 0.097505
| 0.06721
| 0.067656
| 0.088616
| 0.876728
| 0.86233
| 0.856661
| 0.84341
| 0.837294
| 0.830477
| 0
| 0.017011
| 0.162055
| 31,008
| 676
| 492
| 45.869822
| 0.587115
| 0.159572
| 0
| 0.763838
| 0
| 0.199262
| 0.439471
| 0.29558
| 0
| 0
| 0
| 0.001479
| 0.01107
| 1
| 0.01107
| false
| 0.00369
| 0.00369
| 0
| 0.04059
| 0.00738
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
eafa2ac2103b5eeb45fa4df9da68aba5fb643462
| 11,521
|
py
|
Python
|
tests/unittests/test_blocking.py
|
seifert/subwabbit
|
319676a2a31928a9d8ef1a7a3a3182d175c2c28e
|
[
"BSD-3-Clause"
] | 12
|
2019-06-03T04:10:50.000Z
|
2021-10-01T18:24:23.000Z
|
tests/unittests/test_blocking.py
|
seifert/subwabbit
|
319676a2a31928a9d8ef1a7a3a3182d175c2c28e
|
[
"BSD-3-Clause"
] | 3
|
2019-10-17T17:57:03.000Z
|
2021-10-02T07:48:55.000Z
|
tests/unittests/test_blocking.py
|
seifert/subwabbit
|
319676a2a31928a9d8ef1a7a3a3182d175c2c28e
|
[
"BSD-3-Clause"
] | 7
|
2019-09-06T19:15:16.000Z
|
2021-10-01T18:24:24.000Z
|
import copy
import pytest
import random
from unittest.mock import Mock, MagicMock, call, patch
from subwabbit.base import VowpalWabbitDummyFormatter
from subwabbit.blocking import VowpalWabbitProcess
@pytest.mark.parametrize(
'return_predictions_batch',
[
[[1]],
[[1, 2]],
[[1, 2], [3]],
[[1, 2], [3, 4]],
[[1, 2], [3, 4], [5]],
[[1, 2], [3, 4], [5, 6]]
],
ids=[
'Batch has less values than batch size',
'Batch has same length as batch',
'One and a half batches',
'Two batches',
'Two and a half batches',
'Three batches'
]
)
def test_predict_without_timeout(return_predictions_batch):
batch_size = 2
num_items = sum(len(batch) for batch in return_predictions_batch)
return_predictions_batch_copy = return_predictions_batch.copy()
formatter = VowpalWabbitDummyFormatter()
common_features = '|a user1'
items_features = ['|b item{}'.format(i) for i in range(num_items)]
self = Mock(
formatter=formatter,
batch_size=batch_size,
write_only=False,
_send_lines_to_vowpal=Mock(),
_get_predictions_from_vowpal=Mock(side_effect=lambda detailed_metrics, debug_info: return_predictions_batch_copy.pop(0))
)
detailed_metrics = MagicMock()
predictions = list(VowpalWabbitProcess.predict(self, common_features, iter(items_features),
detailed_metrics=detailed_metrics))
for i, performed_call in enumerate(self._send_lines_to_vowpal.mock_calls):
items_from = i * batch_size
items_to = i * batch_size + batch_size
assert performed_call == call(
[formatter.get_formatted_example(common_features, item_features) for item_features in items_features[items_from:items_to]],
detailed_metrics, debug_info=None
)
assert predictions == [prediction for batch in return_predictions_batch for prediction in batch]
@pytest.mark.parametrize(
'return_predictions_batch, expected_predictions, timeout_after_item',
[
([[1, 2], [3, 4], [5, 6]], [], 0),
([[1, 2], [3, 4], [5, 6]], [], 1), # no prediction is provided because there was no batch in progress in the moment of timeout
([[1, 2], [3, 4], [5, 6]], [1, 2], 2), # 2 predictions are returned
([[1, 2], [3, 4], [5, 6]], [1, 2, 3, 4, 5, 6], 8) # all predictions are returned
],
ids=[
'Timeout immediately',
'Timeout after first item',
'Timeout after two items - ',
'All items in time'
]
)
def test_predict_with_timeout(return_predictions_batch, expected_predictions, timeout_after_item):
batch_size = 2
num_items = sum(len(batch) for batch in return_predictions_batch)
return_predictions_batch_copy = return_predictions_batch.copy()
formatter = VowpalWabbitDummyFormatter()
common_features = '|a user1'
items_features = ['|b item{}'.format(i) for i in range(num_items)]
processed_items = -1
def perf_counter_side_effect():
if processed_items >= timeout_after_item:
return 1
else:
return 0
perf_counter_mock = Mock(
side_effect=perf_counter_side_effect
)
def get_items_iterator(items):
nonlocal processed_items
for item in items:
processed_items += 1
yield item
self = Mock(
formatter=formatter,
batch_size=batch_size,
write_only=False,
_send_lines_to_vowpal=Mock(),
_get_predictions_from_vowpal=Mock(side_effect=lambda detailed_metrics, debug_info: return_predictions_batch_copy.pop(0))
)
detailed_metrics = MagicMock()
with patch('subwabbit.blocking.time.perf_counter', new=perf_counter_mock):
predictions = list(VowpalWabbitProcess.predict(self, common_features, get_items_iterator(items_features),
timeout=0.5,
detailed_metrics=detailed_metrics))
for i, performed_call in enumerate(self._send_lines_to_vowpal.mock_calls):
items_from = i * batch_size
items_to = i * batch_size + batch_size
assert performed_call == call(
[formatter.get_formatted_example(common_features, item_features) for item_features in items_features[items_from:items_to]],
detailed_metrics, debug_info=None
)
assert predictions == expected_predictions
@pytest.mark.parametrize(
'return_predictions_batch',
[
[[1]],
[[1, 2]],
[[1, 2], [3]],
[[1, 2], [3, 4]],
[[1, 2], [3, 4], [5]],
[[1, 2], [3, 4], [5, 6]]
],
ids=[
'Batch has less values than batch size',
'Batch has same length as batch',
'One and a half batches',
'Two batches',
'Two and a half batches',
'Three batches'
]
)
def test_predict_io_calls(return_predictions_batch):
batch_size = 2
num_items = sum(len(batch) for batch in return_predictions_batch)
return_predictions_batch_copy = copy.deepcopy(return_predictions_batch)
def get_next_prediction():
if return_predictions_batch_copy[0]:
return str(return_predictions_batch_copy[0].pop(0))
else:
return_predictions_batch_copy.pop(0)
return get_next_prediction()
formatter = VowpalWabbitDummyFormatter()
common_features = '|a user1'
items_features = ['|b item{}'.format(i) for i in range(num_items)]
vw_process = Mock(
stdin=Mock(),
stdout=Mock(
readline=Mock(side_effect=lambda: bytes(get_next_prediction() + '\n', encoding='utf-8'))
)
)
popen = Mock(
return_value=vw_process
)
with patch('subwabbit.blocking.subprocess.Popen', new=popen):
model = VowpalWabbitProcess(
formatter=formatter,
batch_size=batch_size,
vw_args=[]
)
predictions = list(model.predict(common_features, iter(items_features)))
expected_calls = []
for i, item_features in enumerate(return_predictions_batch):
items_from = i * batch_size
items_to = i * batch_size + batch_size
expected_calls.append(
call.write(
bytes(
'\n'.join([formatter.get_formatted_example(common_features, item_features) for item_features in
items_features[items_from:items_to]]) + '\n',
encoding='utf-8'
)
)
)
expected_calls.append(call.flush())
vw_process.stdin.assert_has_calls(expected_calls)
assert predictions == [prediction for batch in return_predictions_batch for prediction in batch]
assert model.unprocessed_batch_sizes == []
@pytest.mark.parametrize(
'return_predictions_batch',
[
[[1, 2], [3, 4], [5]],
[[1, 2], [3, 4], [5, 6]]
],
ids=[
'Last batch is not full',
'Last batch is full'
]
)
def test_train(return_predictions_batch):
batch_size = 2
num_items = sum(len(batch) for batch in return_predictions_batch)
return_predictions_batch_copy = copy.deepcopy(return_predictions_batch)
formatter = VowpalWabbitDummyFormatter()
common_features = '|a user1'
items_features = ['|b item{}'.format(i) for i in range(num_items)]
weights = [random.random() for _ in range(num_items)]
labels = [random.random() for _ in range(num_items)]
def get_next_prediction():
if return_predictions_batch_copy[0]:
return str(return_predictions_batch_copy[0].pop(0))
else:
return_predictions_batch_copy.pop(0)
return get_next_prediction()
vw_process = Mock(
stdin=Mock(),
stdout=Mock(
readline=Mock(side_effect=lambda: bytes(get_next_prediction() + '\n', encoding='utf-8'))
)
)
popen = Mock(
return_value=vw_process
)
with patch('subwabbit.blocking.subprocess.Popen', new=popen):
model = VowpalWabbitProcess(
formatter=formatter,
batch_size=batch_size,
vw_args=[]
)
assert model.vw_process == vw_process
model.train(common_features, iter(items_features), iter(labels), iter(weights))
expected_calls = []
for i, item_features in enumerate(return_predictions_batch):
items_from = i * batch_size
items_to = i * batch_size + batch_size
expected_calls.append(
call.write(
bytes(
'\n'.join([
formatter.get_formatted_example(common_features, item_features, label, weight)
for item_features, label, weight in zip(
items_features[items_from:items_to],
labels[items_from:items_to],
weights[items_from:items_to])
]) + '\n',
encoding='utf-8'
)
)
)
expected_calls.append(call.flush())
vw_process.stdin.assert_has_calls(expected_calls)
assert model.unprocessed_batch_sizes == []
@pytest.mark.parametrize(
'return_predictions_batch',
[
[[1, 2], [3, 4], [5]],
[[1, 2], [3, 4], [5, 6]]
],
ids=[
'Last batch is not full',
'Last batch is full'
]
)
def test_train_write_only(return_predictions_batch):
batch_size = 2
num_items = sum(len(batch) for batch in return_predictions_batch)
formatter = VowpalWabbitDummyFormatter()
common_features = '|a user1'
items_features = ['|b item{}'.format(i) for i in range(num_items)]
weights = [random.random() for _ in range(num_items)]
labels = [random.random() for _ in range(num_items)]
vw_process = Mock(
stdin=Mock(),
stdout=Mock()
)
popen = Mock(
return_value=vw_process
)
with patch('subwabbit.blocking.subprocess.Popen', new=popen):
model = VowpalWabbitProcess(
formatter=formatter,
batch_size=batch_size,
write_only=True,
vw_args=[]
)
assert model.vw_process == vw_process
model.train(common_features, iter(items_features), iter(labels), iter(weights))
expected_calls = []
for i, item_features in enumerate(return_predictions_batch):
items_from = i * batch_size
items_to = i * batch_size + batch_size
expected_calls.append(
call.write(
bytes(
'\n'.join([
formatter.get_formatted_example(common_features, item_features, label, weight)
for item_features, label, weight in zip(
items_features[items_from:items_to],
labels[items_from:items_to],
weights[items_from:items_to])
]) + '\n',
encoding='utf-8'
)
)
)
expected_calls.append(call.flush())
vw_process.stdin.assert_has_calls(expected_calls)
vw_process.stdout.assert_not_called()
| 34.912121
| 135
| 0.594046
| 1,310
| 11,521
| 4.948092
| 0.116794
| 0.094415
| 0.122185
| 0.009256
| 0.872107
| 0.864702
| 0.85992
| 0.840636
| 0.819192
| 0.819192
| 0
| 0.016467
| 0.304227
| 11,521
| 329
| 136
| 35.018237
| 0.792166
| 0.012586
| 0
| 0.714777
| 0
| 0
| 0.076064
| 0.024886
| 0
| 0
| 0
| 0
| 0.044674
| 1
| 0.030928
| false
| 0
| 0.020619
| 0
| 0.072165
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dc77aa28a3696662e89d95d26b590041e12dc276
| 5,483
|
py
|
Python
|
tests/dao/test_delete.py
|
nadirhamid/protean
|
d31bc634e05c9221e82136bf18c2ceaa0982c1c8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dao/test_delete.py
|
nadirhamid/protean
|
d31bc634e05c9221e82136bf18c2ceaa0982c1c8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dao/test_delete.py
|
nadirhamid/protean
|
d31bc634e05c9221e82136bf18c2ceaa0982c1c8
|
[
"BSD-3-Clause"
] | null | null | null |
# Protean
import pytest
from protean.core.exceptions import ObjectNotFoundError
from protean.core.queryset import Q
# Local/Relative Imports
from .elements import Person, PersonRepository, User
class TestDAODeleteFunctionality:
@pytest.fixture(autouse=True)
def register_elements(self, test_domain):
test_domain.register(Person)
test_domain.register(PersonRepository, aggregate_cls=Person)
test_domain.register(User)
def test_delete_an_object_in_repository_by_id(self, test_domain):
""" Delete an object in the reposoitory by ID"""
person = test_domain.get_dao(Person).create(
id=3, first_name="John", last_name="Doe", age=22
)
deleted_person = test_domain.get_dao(Person).delete(person)
assert deleted_person is not None
assert deleted_person.state_.is_destroyed is True
with pytest.raises(ObjectNotFoundError):
test_domain.get_dao(Person).get(3)
def test_delete_all_records_in_repository(self, test_domain):
"""Delete all objects in a repository"""
test_domain.get_dao(Person).create(
id=1, first_name="Athos", last_name="Musketeer", age=2
)
test_domain.get_dao(Person).create(
id=2, first_name="Porthos", last_name="Musketeer", age=3
)
test_domain.get_dao(Person).create(
id=3, first_name="Aramis", last_name="Musketeer", age=4
)
test_domain.get_dao(Person).create(
id=4, first_name="dArtagnan", last_name="Musketeer", age=5
)
person_records = test_domain.get_dao(Person).query.filter(Q())
assert person_records.total == 4
test_domain.get_dao(Person).delete_all()
person_records = test_domain.get_dao(Person).query.filter(Q())
assert person_records.total == 0
def test_deleting_a_persisted_entity(self, test_domain):
""" Delete an object in the reposoitory by ID"""
person = test_domain.get_dao(Person).create(
id=3, first_name="Jim", last_name="Carrey"
)
deleted_person = test_domain.get_dao(Person).delete(person)
assert deleted_person is not None
assert deleted_person.state_.is_destroyed is True
with pytest.raises(ObjectNotFoundError):
test_domain.get_dao(Person).get(3)
def test_deleting_all_entities_of_a_type(self, test_domain):
test_domain.get_dao(Person).create(
id=1, first_name="Athos", last_name="Musketeer", age=2
)
test_domain.get_dao(Person).create(
id=2, first_name="Porthos", last_name="Musketeer", age=3
)
test_domain.get_dao(Person).create(
id=3, first_name="Aramis", last_name="Musketeer", age=4
)
test_domain.get_dao(Person).create(
id=4, first_name="dArtagnan", last_name="Musketeer", age=5
)
people = test_domain.get_dao(Person).query.all()
assert people.total == 4
test_domain.get_dao(Person).delete_all()
people = test_domain.get_dao(Person).query.all()
assert people.total == 0
def test_deleting_all_records_of_a_type_satisfying_a_filter(self, test_domain):
test_domain.get_dao(Person).create(
id=1, first_name="Athos", last_name="Musketeer", age=2
)
test_domain.get_dao(Person).create(
id=2, first_name="Porthos", last_name="Musketeer", age=3
)
test_domain.get_dao(Person).create(
id=3, first_name="Aramis", last_name="Musketeer", age=4
)
test_domain.get_dao(Person).create(
id=4, first_name="d'Artagnan", last_name="Musketeer", age=5
)
# Perform update
deleted_count = test_domain.get_dao(Person).query.filter(age__gt=3).delete_all()
# Query and check if only the relevant records have been deleted
assert deleted_count == 2
person1 = test_domain.get_dao(Person).get(1)
person2 = test_domain.get_dao(Person).get(2)
assert person1 is not None
assert person2 is not None
with pytest.raises(ObjectNotFoundError):
test_domain.get_dao(Person).get(3)
with pytest.raises(ObjectNotFoundError):
test_domain.get_dao(Person).get(4)
def test_deleting_records_satisfying_a_filter(self, test_domain):
test_domain.get_dao(Person).create(
id=1, first_name="Athos", last_name="Musketeer", age=2
)
test_domain.get_dao(Person).create(
id=2, first_name="Porthos", last_name="Musketeer", age=3
)
test_domain.get_dao(Person).create(
id=3, first_name="Aramis", last_name="Musketeer", age=4
)
test_domain.get_dao(Person).create(
id=4, first_name="d'Artagnan", last_name="Musketeer", age=5
)
# Perform update
deleted_count = test_domain.get_dao(Person).query.filter(age__gt=3).delete()
# Query and check if only the relevant records have been updated
assert deleted_count == 2
assert test_domain.get_dao(Person).query.all().total == 2
assert test_domain.get_dao(Person).get(1) is not None
assert test_domain.get_dao(Person).get(2) is not None
with pytest.raises(ObjectNotFoundError):
test_domain.get_dao(Person).get(3)
with pytest.raises(ObjectNotFoundError):
test_domain.get_dao(Person).get(4)
| 37.047297
| 88
| 0.653839
| 728
| 5,483
| 4.667582
| 0.137363
| 0.144202
| 0.149205
| 0.183637
| 0.803414
| 0.786933
| 0.786933
| 0.741613
| 0.741613
| 0.722778
| 0
| 0.014142
| 0.239103
| 5,483
| 147
| 89
| 37.29932
| 0.800336
| 0.055809
| 0
| 0.550459
| 0
| 0
| 0.052387
| 0
| 0
| 0
| 0
| 0
| 0.137615
| 1
| 0.06422
| false
| 0
| 0.036697
| 0
| 0.110092
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dcab73c3b23c5a4ea2df23c1abaa5692d50fbfe4
| 160
|
py
|
Python
|
tests/sat/Models/example5.satelite.variable.elimination.cnf.SAT.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 19
|
2015-12-03T08:53:45.000Z
|
2022-03-31T02:09:43.000Z
|
tests/sat/Models/example5.satelite.variable.elimination.cnf.SAT.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 80
|
2017-11-25T07:57:32.000Z
|
2018-06-10T19:03:30.000Z
|
tests/sat/Models/example5.satelite.variable.elimination.cnf.SAT.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 6
|
2015-01-15T07:51:48.000Z
|
2020-06-18T14:47:48.000Z
|
input = """
p cnf 9 13
-1 -2 0
-1 2 3 0
-1 4 -5 0
-6 -1 7 0
-1 -2 0
-1 2 3 0
-1 4 -5 0
-6 -1 7 0
1 -2 0
1 -3 0
1 -4 0
8 1 2 0
-9 1 2 0
"""
output = """
SAT
"""
| 8
| 12
| 0.425
| 54
| 160
| 1.259259
| 0.296296
| 0.235294
| 0.220588
| 0.176471
| 0.5
| 0.5
| 0.5
| 0.5
| 0.5
| 0.5
| 0
| 0.5
| 0.375
| 160
| 19
| 13
| 8.421053
| 0.18
| 0
| 0
| 0.526316
| 0
| 0
| 0.80625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f4e03a36d037806627cdd77eef67cc5b02fbbef1
| 17,815
|
py
|
Python
|
sds/models/ensemble.py
|
hanyas/sds
|
3c195fb9cbd88a9284287d62c0eacb6afc4598a7
|
[
"MIT"
] | 12
|
2019-09-21T13:52:09.000Z
|
2022-02-14T06:48:46.000Z
|
sds/models/ensemble.py
|
hanyas/sds
|
3c195fb9cbd88a9284287d62c0eacb6afc4598a7
|
[
"MIT"
] | 1
|
2020-01-22T12:34:52.000Z
|
2020-01-26T21:14:11.000Z
|
sds/models/ensemble.py
|
hanyas/sds
|
3c195fb9cbd88a9284287d62c0eacb6afc4598a7
|
[
"MIT"
] | 5
|
2019-09-18T15:11:26.000Z
|
2021-12-10T14:04:53.000Z
|
import numpy as np
import numpy.random as npr
from sds.models import AutoRegressiveHiddenMarkovModel
from sds.models import RecurrentAutoRegressiveHiddenMarkovModel
from sds.models import ClosedLoopRecurrentAutoRegressiveHiddenMarkovModel
from sds.models import AutoRegressiveClosedLoopHiddenMarkovModel
from sds.models import HybridController
from sds.utils.decorate import ensure_args_are_viable
from joblib import Parallel, delayed
import multiprocessing
nb_cores = multiprocessing.cpu_count()
class EnsembleHiddenMarkovModel:
def __init__(self, nb_states, obs_dim, act_dim=0, obs_lag=1,
model_type='rarhmm', ensemble_size=5, **kwargs):
self.nb_states = nb_states
self.obs_dim = obs_dim
self.act_dim = act_dim
self.obs_lag = obs_lag
self.ensemble_size = ensemble_size
type_list = dict(arhmm=AutoRegressiveHiddenMarkovModel,
rarhmm=RecurrentAutoRegressiveHiddenMarkovModel)
self.model_type = type_list[model_type]
self.models = [self.model_type(self.nb_states, self.obs_dim,
self.act_dim, self.obs_lag, **kwargs)
for _ in range(self.ensemble_size)]
def _parallel_em(self, obs, act, **kwargs):
def _create_job(model, obs, act,
kwargs, seed):
nb_iter = kwargs.get('nb_iter', 25)
tol = kwargs.get('tol', 1e-4)
initialize = kwargs.get('initialize', True)
process_id = seed
init_state_mstep_kwargs = kwargs.get('init_state_mstep_kwargs', {})
init_obs_mstep_kwargs = kwargs.get('init_obs_mstep_kwargs', {})
trans_mstep_kwargs = kwargs.get('trans_mstep_kwargs', {})
obs_mstep_kwargs = kwargs.get('obs_mstep_kwargs', {})
ll = model.em(obs, act,
nb_iter=nb_iter, tol=tol,
initialize=initialize, process_id=process_id,
init_state_mstep_kwargs=init_state_mstep_kwargs,
init_obs_mstep_kwargs=init_obs_mstep_kwargs,
trans_mstep_kwargs=trans_mstep_kwargs,
obs_mstep_kwargs=obs_mstep_kwargs)
return model, ll
nb_jobs = len(obs)
kwargs_list = [kwargs.copy() for _ in range(nb_jobs)]
seeds = np.linspace(0, nb_jobs - 1, nb_jobs, dtype=int)
results = Parallel(n_jobs=min(nb_jobs, nb_cores), verbose=10, backend='loky')\
(map(delayed(_create_job), self.models, obs, act, kwargs_list, seeds))
models, lls = list(map(list, zip(*results)))
return models, lls
@ensure_args_are_viable
def em(self, obs, act=None,
nb_iter=50, tol=1e-4, initialize=True,
init_state_mstep_kwargs={},
init_obs_mstep_kwargs={},
trans_mstep_kwargs={},
obs_mstep_kwargs={}, **kwargs):
from sds.utils.general import train_test_split
train_obs, train_act = train_test_split(obs, act,
nb_traj_splits=self.ensemble_size,
split_trajs=False)[:2]
self.models, lls = self._parallel_em(train_obs, train_act,
nb_iter=nb_iter, tol=tol, initialize=initialize,
init_state_mstep_kwargs=init_state_mstep_kwargs,
init_obs_mstep_kwargs=init_obs_mstep_kwargs,
trans_mstep_kwargs=trans_mstep_kwargs,
obs_mstep_kwargs=obs_mstep_kwargs)
nb_train = [np.vstack(x).shape[0] for x in train_obs]
nb_total = np.vstack(obs).shape[0]
train_ll, total_ll = [], []
for x, u, m in zip(train_obs, train_act, self.models):
train_ll.append(m.log_normalizer(x, u))
total_ll.append(m.log_normalizer(obs, act))
train_scores = np.hstack(train_ll) / np.hstack(nb_train)
test_scores = (np.hstack(total_ll) - np.hstack(train_ll))\
/ (nb_total - np.hstack(nb_train))
return train_scores, test_scores
def step(self, hist_obs, hist_act, stoch=False, average=False):
nxt_obs = np.zeros((self.ensemble_size, self.obs_dim))
for i, model in enumerate(self.models):
_, nxt_obs[i] = model.step(hist_obs, hist_act, stoch, average)
return np.mean(nxt_obs, axis=0)
def forcast(self, horizon=1, hist_obs=None, hist_act=None,
nxt_act=None, stoch=False, average=False):
if isinstance(horizon, int) and isinstance(hist_obs, np.ndarray):
nxt_obs = []
for m in self.models:
nxt_obs.append(m.forcast(horizon, hist_obs, hist_act, nxt_act, stoch, average)[1])
return np.mean(np.stack(nxt_obs, axis=0), axis=0)
else:
nxt_obs = []
for m in self.models:
_nxt_obs = m.forcast(horizon, hist_obs, hist_act, nxt_act, stoch, average)[1]
nxt_obs.append(np.stack(_nxt_obs, 0))
return np.mean(np.stack(nxt_obs, axis=0), axis=0)
def _kstep_error(self, obs, act, horizon=1, stoch=False, average=False):
from sklearn.metrics import mean_squared_error, \
explained_variance_score, r2_score
hist_obs, hist_act, nxt_act = [], [], []
forcast, target, prediction = [], [], []
nb_steps = obs.shape[0] - horizon - self.obs_lag + 1
for t in range(nb_steps):
hist_obs.append(obs[:t + self.obs_lag, :])
hist_act.append(act[:t + self.obs_lag, :])
nxt_act.append(act[t + self.obs_lag - 1:t + self.obs_lag - 1 + horizon, :])
hr = [horizon for _ in range(nb_steps)]
forcast = self.forcast(horizon=hr, hist_obs=hist_obs, hist_act=hist_act,
nxt_act=nxt_act, stoch=stoch, average=average)
for t in range(nb_steps):
target.append(obs[t + self.obs_lag - 1 + horizon, :])
prediction.append(forcast[t][-1, :])
target = np.vstack(target)
prediction = np.vstack(prediction)
mse = mean_squared_error(target, prediction)
smse = 1. - r2_score(target, prediction, multioutput='variance_weighted')
evar = explained_variance_score(target, prediction, multioutput='variance_weighted')
return mse, smse, evar
@ensure_args_are_viable
def kstep_error(self, obs, act, horizon=1, stoch=False, average=False):
if isinstance(obs, np.ndarray) and isinstance(act, np.ndarray):
return self._kstep_error(obs, act, horizon, stoch, average)
else:
def inner(obs, act):
return self.kstep_error.__wrapped__(self, obs, act, horizon, stoch, average)
res = list(map(inner, obs, act))
mse, smse, evar = list(map(list, zip(*res)))
return np.mean(mse), np.mean(smse), np.mean(evar)
class EnsembleClosedLoopHiddenMarkovModel:
def __init__(self, nb_states, obs_dim, act_dim, obs_lag=1,
ensemble_size=6, **kwargs):
self.nb_states = nb_states
self.obs_dim = obs_dim
self.act_dim = act_dim
self.obs_lag = obs_lag
self.ensemble_size = ensemble_size
self.models = [ClosedLoopRecurrentAutoRegressiveHiddenMarkovModel(self.nb_states, self.obs_dim,
self.act_dim, self.obs_lag, **kwargs)
for _ in range(self.ensemble_size)]
def _parallel_em(self, obs, act, **kwargs):
def _create_job(model, obs, act,
kwargs, seed):
nb_iter = kwargs.get('nb_iter', 25)
tol = kwargs.get('tol', 1e-4)
initialize = kwargs.get('initialize', True)
process_id = seed
init_state_mstep_kwargs = kwargs.get('init_state_mstep_kwargs', {})
init_obs_mstep_kwargs = kwargs.get('init_obs_mstep_kwargs', {})
trans_mstep_kwargs = kwargs.get('trans_mstep_kwargs', {})
obs_mstep_kwargs = kwargs.get('obs_mstep_kwargs', {})
ctl_mstep_kwargs = kwargs.get('ctl_mstep_kwargs', {})
ll = model.em(obs, act,
nb_iter=nb_iter, tol=tol,
initialize=initialize, process_id=process_id,
init_state_mstep_kwargs=init_state_mstep_kwargs,
init_obs_mstep_kwargs=init_obs_mstep_kwargs,
trans_mstep_kwargs=trans_mstep_kwargs,
obs_mstep_kwargs=obs_mstep_kwargs,
ctl_mstep_kwargs=ctl_mstep_kwargs)
return model, ll
nb_jobs = len(obs)
kwargs_list = [kwargs.copy() for _ in range(nb_jobs)]
seeds = np.linspace(0, nb_jobs - 1, nb_jobs, dtype=int)
results = Parallel(n_jobs=min(nb_jobs, nb_cores), verbose=10, backend='loky')\
(map(delayed(_create_job), self.models, obs, act, kwargs_list, seeds))
models, lls = list(map(list, zip(*results)))
return models, lls
@ensure_args_are_viable
def em(self, obs, act=None,
nb_iter=50, tol=1e-4, initialize=True,
init_state_mstep_kwargs={},
init_obs_mstep_kwargs={},
trans_mstep_kwargs={},
obs_mstep_kwargs={},
ctl_mstep_kwargs={}, **kwargs):
from sds.utils.general import train_test_split
train_obs, train_act = train_test_split(obs, act,
nb_traj_splits=self.ensemble_size,
split_trajs=False)[:2]
self.models, lls = self._parallel_em(train_obs, train_act,
nb_iter=nb_iter, tol=tol, initialize=initialize,
init_state_mstep_kwargs=init_state_mstep_kwargs,
init_obs_mstep_kwargs=init_obs_mstep_kwargs,
trans_mstep_kwargs=trans_mstep_kwargs,
obs_mstep_kwargs=obs_mstep_kwargs,
ctl_mstep_kwargs=ctl_mstep_kwargs)
nb_train = [np.vstack(x).shape[0] for x in train_obs]
nb_total = np.vstack(obs).shape[0]
train_ll, total_ll = [], []
for x, u, m in zip(train_obs, train_act, self.models):
train_ll.append(m.log_normalizer(x, u))
total_ll.append(m.log_normalizer(obs, act))
train_scores = np.hstack(train_ll) / np.hstack(nb_train)
test_scores = (np.hstack(total_ll) - np.hstack(train_ll))\
/ (nb_total - np.hstack(nb_train))
return train_scores, test_scores
class EnsembleAutoRegressiveClosedLoopHiddenMarkovModel:
def __init__(self, nb_states, obs_dim, act_dim, obs_lag=1,
ctl_lag=1, ensemble_size=6, **kwargs):
self.nb_states = nb_states
self.obs_dim = obs_dim
self.act_dim = act_dim
self.obs_lag = obs_lag
self.ctl_lag = ctl_lag
self.ensemble_size = ensemble_size
self.models = [AutoRegressiveClosedLoopHiddenMarkovModel(self.nb_states, self.obs_dim, self.act_dim,
self.obs_lag, self.ctl_lag, **kwargs)
for _ in range(self.ensemble_size)]
def _parallel_em(self, obs, act, **kwargs):
def _create_job(model, obs, act,
kwargs, seed):
nb_iter = kwargs.get('nb_iter', 25)
tol = kwargs.get('tol', 1e-4)
initialize = kwargs.get('initialize', True)
process_id = seed
init_state_mstep_kwargs = kwargs.get('init_state_mstep_kwargs', {})
init_obs_mstep_kwargs = kwargs.get('init_obs_mstep_kwargs', {})
init_ctl_mstep_kwargs = kwargs.get('init_ctl_mstep_kwargs', {})
trans_mstep_kwargs = kwargs.get('trans_mstep_kwargs', {})
obs_mstep_kwargs = kwargs.get('obs_mstep_kwargs', {})
ctl_mstep_kwargs = kwargs.get('ctl_mstep_kwargs', {})
ll = model.em(obs, act,
nb_iter=nb_iter, tol=tol,
initialize=initialize, process_id=process_id,
init_state_mstep_kwargs=init_state_mstep_kwargs,
init_obs_mstep_kwargs=init_obs_mstep_kwargs,
init_ctl_mstep_kwargs=init_ctl_mstep_kwargs,
trans_mstep_kwargs=trans_mstep_kwargs,
obs_mstep_kwargs=obs_mstep_kwargs,
ctl_mstep_kwargs=ctl_mstep_kwargs)
return model, ll
nb_jobs = len(obs)
kwargs_list = [kwargs.copy() for _ in range(nb_jobs)]
seeds = np.linspace(0, nb_jobs - 1, nb_jobs, dtype=int)
results = Parallel(n_jobs=min(nb_jobs, nb_cores), verbose=10, backend='loky')\
(map(delayed(_create_job), self.models, obs, act, kwargs_list, seeds))
models, lls = list(map(list, zip(*results)))
return models, lls
@ensure_args_are_viable
def em(self, obs, act=None,
nb_iter=50, tol=1e-4, initialize=True,
init_state_mstep_kwargs={},
init_obs_mstep_kwargs={},
init_ctl_mstep_kwargs={},
trans_mstep_kwargs={},
obs_mstep_kwargs={},
ctl_mstep_kwargs={}, **kwargs):
from sds.utils.general import train_test_split
train_obs, train_act = train_test_split(obs, act,
nb_traj_splits=self.ensemble_size,
split_trajs=False)[:2]
self.models, lls = self._parallel_em(train_obs, train_act,
nb_iter=nb_iter, tol=tol, initialize=initialize,
init_state_mstep_kwargs=init_state_mstep_kwargs,
init_obs_mstep_kwargs=init_obs_mstep_kwargs,
init_ctl_mstep_kwargs=init_ctl_mstep_kwargs,
trans_mstep_kwargs=trans_mstep_kwargs,
obs_mstep_kwargs=obs_mstep_kwargs,
ctl_mstep_kwargs=ctl_mstep_kwargs)
nb_train = [np.vstack(x).shape[0] for x in train_obs]
nb_total = np.vstack(obs).shape[0]
train_ll, total_ll = [], []
for x, u, m in zip(train_obs, train_act, self.models):
train_ll.append(m.log_normalizer(x, u))
total_ll.append(m.log_normalizer(obs, act))
train_scores = np.hstack(train_ll) / np.hstack(nb_train)
test_scores = (np.hstack(total_ll) - np.hstack(train_ll))\
/ (nb_total - np.hstack(nb_train))
return train_scores, test_scores
class EnsembleHybridController:
def __init__(self, dynamics, ensemble_size=6, **kwargs):
self.dynamics = dynamics
self.ensemble_size = ensemble_size
self.models = [HybridController(dynamics, **kwargs)
for _ in range(self.ensemble_size)]
def _parallel_em(self, obs, act, **kwargs):
def _create_job(model, obs, act,
kwargs, seed):
nb_iter = kwargs.get('nb_iter', 25)
tol = kwargs.get('tol', 1e-4)
initialize = kwargs.get('initialize', False)
process_id = seed
ctl_mstep_kwargs = kwargs.get('ctl_mstep_kwargs', {})
ll = model.em(obs, act,
nb_iter=nb_iter, tol=tol,
initialize=initialize, process_id=process_id,
ctl_mstep_kwargs=ctl_mstep_kwargs)
return model, ll
nb_jobs = len(obs)
kwargs_list = [kwargs.copy() for _ in range(nb_jobs)]
seeds = np.linspace(0, nb_jobs - 1, nb_jobs, dtype=int)
results = Parallel(n_jobs=min(nb_jobs, nb_cores), verbose=10, backend='loky')\
(map(delayed(_create_job), self.models, obs, act, kwargs_list, seeds))
models, lls = list(map(list, zip(*results)))
return models, lls
@ensure_args_are_viable
def em(self, obs, act=None,
nb_iter=50, tol=1e-4, initialize=True,
ctl_mstep_kwargs={}, **kwargs):
from sds.utils.general import train_test_split
train_obs, train_act = train_test_split(obs, act,
nb_traj_splits=self.ensemble_size,
split_trajs=False)[:2]
self.models, lls = self._parallel_em(train_obs, train_act,
nb_iter=nb_iter, tol=tol,
initialize=initialize,
ctl_mstep_kwargs=ctl_mstep_kwargs)
nb_train = [np.vstack(x).shape[0] for x in train_obs]
nb_total = np.vstack(obs).shape[0]
train_ll, total_ll = [], []
for x, u, m in zip(train_obs, train_act, self.models):
train_ll.append(m.log_normalizer(x, u))
total_ll.append(m.log_normalizer(obs, act))
train_scores = np.hstack(train_ll) / np.hstack(nb_train)
test_scores = (np.hstack(total_ll) - np.hstack(train_ll))\
/ (nb_total - np.hstack(nb_train))
return train_scores, test_scores
| 42.21564
| 111
| 0.574235
| 2,154
| 17,815
| 4.421541
| 0.072888
| 0.129357
| 0.061739
| 0.044099
| 0.809429
| 0.793364
| 0.76554
| 0.755985
| 0.74874
| 0.739605
| 0
| 0.007448
| 0.329273
| 17,815
| 421
| 112
| 42.315914
| 0.789606
| 0
| 0
| 0.766562
| 0
| 0
| 0.024642
| 0.008588
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066246
| false
| 0
| 0.047319
| 0.003155
| 0.18612
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7618b58a4c050de560f03b7fb7afcc31d9fe7d58
| 1,638
|
py
|
Python
|
roboverse/envs/tasks.py
|
VentusYue/roboverse
|
bd19e0ef7bdcae1198aa768bfe9fc18c51878b6d
|
[
"MIT"
] | null | null | null |
roboverse/envs/tasks.py
|
VentusYue/roboverse
|
bd19e0ef7bdcae1198aa768bfe9fc18c51878b6d
|
[
"MIT"
] | null | null | null |
roboverse/envs/tasks.py
|
VentusYue/roboverse
|
bd19e0ef7bdcae1198aa768bfe9fc18c51878b6d
|
[
"MIT"
] | null | null | null |
class Task:
"""Interface for subtask definition."""
REWARD = 1.0 # reward that's received upon completion
def done(self, info):
raise NotImplementedError("Task classes need to define their success condition.")
class PickPlaceTask(Task):
def __init__(self, object, target_object, pos, target_pos):
self._object = object
self._target_object = target_object
self._pos = pos
self._target_pos = target_pos
def done(self, info):
return info['place_success']
@property
def object(self):
return self._object
@property
def target_pos(self):
return self._target_pos
class PickTask(Task):
def __init__(self, object, target_object, pos, target_pos):
self._object = object
self._target_object = target_object
self._pos = pos
self._target_pos = target_pos
def done(self, info):
return info['grasp_success']
@property
def object(self):
return self._object
class PlaceTask(Task):
def __init__(self, object, target_object, pos, target_pos):
self._object = object
self._target_object = target_object
self._pos = pos
self._target_pos = target_pos
def done(self, info):
return info['place_success']
@property
def object(self):
return self._object
@property
def target_pos(self):
return self._target_pos
class DrawerOpenTask(Task):
def done(self, info):
return info['drawer_opened']
class DrawerClosedTask(Task):
def done(self, info):
return info['drawer_closed']
| 23.070423
| 89
| 0.644689
| 198
| 1,638
| 5.050505
| 0.217172
| 0.117
| 0.066
| 0.09
| 0.746
| 0.746
| 0.746
| 0.746
| 0.632
| 0.632
| 0
| 0.001664
| 0.266178
| 1,638
| 70
| 90
| 23.4
| 0.830283
| 0.044567
| 0
| 0.77551
| 0
| 0
| 0.075193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.204082
| 0.632653
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
76213c77cea6a26a7f4b37a34070420e3e97e70b
| 49,982
|
py
|
Python
|
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/MolKit/data/unict_dat.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | 9
|
2021-03-06T04:24:28.000Z
|
2022-01-03T09:53:07.000Z
|
MolKit/data/unict_dat.py
|
e-mayo/autodocktools-prepare-py3k
|
2dd2316837bcb7c19384294443b2855e5ccd3e01
|
[
"BSD-3-Clause"
] | 3
|
2021-03-07T05:37:16.000Z
|
2021-09-19T15:06:54.000Z
|
MolKit/data/unict_dat.py
|
e-mayo/autodocktools-prepare-py3k
|
2dd2316837bcb7c19384294443b2855e5ccd3e01
|
[
"BSD-3-Clause"
] | 4
|
2019-08-28T23:11:39.000Z
|
2021-11-27T08:43:36.000Z
|
unict_dat = {
"TYR": { "impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CA', 'OXT', 'C', 'O']],
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"OH":{'torsion': 180.0, 'tree': 'S', 'NC': 9, 'NB': 10, 'NA': 11, 'I': 12, 'angle': 120.0, 'blen': 1.36, 'charge': -0.368, 'type': 'OH'},
"loopList":[['CG', 'CD2']],
"CD2":{'torsion': 0.0, 'tree': 'E', 'NC': 10, 'NB': 11, 'NA': 14, 'I': 15, 'angle': 120.0, 'blen': 1.4, 'charge': -0.035, 'type': 'CD'},
"NAMRES":'TYROSINE COO- ANION',
"atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'CD1', 'CE1', 'CZ', 'OH', 'HOH', 'CE2', 'CD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HOH":{'torsion': 0.0, 'tree': 'E', 'NC': 10, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 113.0, 'blen': 0.96, 'charge': 0.339, 'type': 'HO'},
"CE1":{'torsion': 180.0, 'tree': 'S', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 10, 'angle': 120.0, 'blen': 1.4, 'charge': 0.1, 'type': 'CD'},
"CD1":{'torsion': 180.0, 'tree': 'S', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 120.0, 'blen': 1.4, 'charge': -0.035, 'type': 'CD'},
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"CZ":{'torsion': 0.0, 'tree': 'B', 'NC': 8, 'NB': 9, 'NA': 10, 'I': 11, 'angle': 120.0, 'blen': 1.4, 'charge': -0.121, 'type': 'C'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 16, 'I': 17, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 109.47, 'blen': 1.51, 'charge': -0.001, 'type': 'CA'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.239, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.022, 'type': 'C2'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 16, 'I': 18, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CE2":{'torsion': 0.0, 'tree': 'S', 'NC': 9, 'NB': 10, 'NA': 11, 'I': 14, 'angle': 120.0, 'blen': 1.4, 'charge': 0.1, 'type': 'CD'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 16, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
},
"ASN": { "ND2":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 10, 'angle': 116.6, 'blen': 1.335, 'charge': -0.867, 'type': 'N'},
"atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'OD1', 'ND2', 'HND1', 'HND2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 13, 'I': 14, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"OD1":{'torsion': 0.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 120.5, 'blen': 1.229, 'charge': -0.47, 'type': 'O'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CB', 'ND2', 'CG', 'OD1'], ['CG', 'HND1', 'ND2', 'HND2'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'B', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 111.1, 'blen': 1.522, 'charge': 0.675, 'type': 'C'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.211, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.003, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HND1":{'torsion': 0.0, 'tree': 'E', 'NC': 7, 'NB': 8, 'NA': 10, 'I': 11, 'angle': 119.8, 'blen': 1.01, 'charge': 0.344, 'type': 'H'},
"HND2":{'torsion': 180.0, 'tree': 'E', 'NC': 7, 'NB': 8, 'NA': 10, 'I': 12, 'angle': 119.8, 'blen': 1.01, 'charge': 0.344, 'type': 'H'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 13, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 13, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'ASPARAGINE COO- ANION',
},
"CYS": { "atNameList":['N', 'HN', 'CA', 'CB', 'SG', 'HSG', 'LP1', 'LP2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"SG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 116.0, 'blen': 1.81, 'charge': 0.827, 'type': 'SH'},
"LP1":{'torsion': 80.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 10, 'angle': 96.7, 'blen': 0.679, 'charge': -0.481, 'type': 'LP'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 12, 'I': 13, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"LP2":{'torsion': 280.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 11, 'angle': 96.7, 'blen': 0.679, 'charge': -0.481, 'type': 'LP'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.14, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.1, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HSG":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 96.0, 'blen': 1.33, 'charge': 0.135, 'type': 'HS'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 12, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 12, 'I': 14, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'CYSTEINE COO- ANION',
},
"ARG": { "impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['NE', 'NH1', 'CZ', 'NH2'], ['CA', 'OXT', 'C', 'O']],
"NH1":{'torsion': 0.0, 'tree': 'B', 'NC': 9, 'NB': 10, 'NA': 12, 'I': 13, 'angle': 122.0, 'blen': 1.33, 'charge': -0.6345, 'type': 'N2'},
"HN12":{'torsion': 180.0, 'tree': 'E', 'NC': 10, 'NB': 12, 'NA': 13, 'I': 15, 'angle': 119.8, 'blen': 1.01, 'charge': 0.3615, 'type': 'H3'},
"INTX,KFORM":['INT', '1'],
"NE":{'torsion': 180.0, 'tree': 'B', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 10, 'angle': 111.0, 'blen': 1.48, 'charge': -0.493, 'type': 'N2'},
"HN11":{'torsion': 0.0, 'tree': 'E', 'NC': 10, 'NB': 12, 'NA': 13, 'I': 14, 'angle': 119.8, 'blen': 1.01, 'charge': 0.3615, 'type': 'H3'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HN22":{'torsion': 180.0, 'tree': 'E', 'NC': 10, 'NB': 12, 'NA': 16, 'I': 18, 'angle': 119.8, 'blen': 1.01, 'charge': 0.3615, 'type': 'H3'},
"HN21":{'torsion': 0.0, 'tree': 'E', 'NC': 10, 'NB': 12, 'NA': 16, 'I': 17, 'angle': 119.8, 'blen': 1.01, 'charge': 0.3615, 'type': 'H3'},
"NAMRES":'ARGININE',
"atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'CD', 'NE', 'HNE', 'CZ', 'NH1', 'HN11', 'HN12', 'NH2', 'HN21', 'HN22', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"NH2":{'torsion': 180.0, 'tree': 'B', 'NC': 9, 'NB': 10, 'NA': 12, 'I': 16, 'angle': 118.0, 'blen': 1.33, 'charge': -0.6345, 'type': 'N2'},
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"CZ":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 9, 'NA': 10, 'I': 12, 'angle': 123.0, 'blen': 1.33, 'charge': 0.813, 'type': 'CA'},
"CD":{'torsion': 180.0, 'tree': 'S', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 109.47, 'blen': 1.525, 'charge': 0.111, 'type': 'C2'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 20, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 109.47, 'blen': 1.525, 'charge': 0.058, 'type': 'C2'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.231, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.049, 'type': 'C2'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 21, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"HNE":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 9, 'NA': 10, 'I': 11, 'angle': 118.5, 'blen': 1.01, 'charge': 0.294, 'type': 'H3'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 19, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
},
"LEU": { "atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CD1', 'CG', 'CB', 'CD2'], ['CA', 'OXT', 'C', 'O']],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 11, 'I': 12, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CD2":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 10, 'angle': 109.47, 'blen': 1.525, 'charge': -0.014, 'type': 'C3'},
"CD1":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 109.47, 'blen': 1.525, 'charge': -0.014, 'type': 'C3'},
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'B', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 109.47, 'blen': 1.525, 'charge': 0.054, 'type': 'CH'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.198, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.016, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 11, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 11, 'I': 13, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'LEUCINE COO- ANION',
},
"HID": { "atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'ND1', 'HND', 'CE1', 'NE2', 'CD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"NE2":{'torsion': 0.0, 'tree': 'S', 'NC': 8, 'NB': 9, 'NA': 11, 'I': 12, 'angle': 109.0, 'blen': 1.31, 'charge': -0.527, 'type': 'NB'},
"ND1":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 122.0, 'blen': 1.39, 'charge': -0.444, 'type': 'NA'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CE1":{'torsion': 180.0, 'tree': 'S', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 11, 'angle': 108.0, 'blen': 1.32, 'charge': 0.384, 'type': 'CP'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 14, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 115.0, 'blen': 1.51, 'charge': 0.089, 'type': 'CC'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.213, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.06, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"loopList":[['CG', 'CD2']],
"HND":{'torsion': 0.0, 'tree': 'E', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 10, 'angle': 126.0, 'blen': 1.01, 'charge': 0.32, 'type': 'H'},
"CUT":['0.00000'],
"CD2":{'torsion': 0.0, 'tree': 'E', 'NC': 9, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 110.0, 'blen': 1.36, 'charge': 0.145, 'type': 'CF'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 16, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'HISTIDINE DELTAH COO- ANION',
},
"HIE": { "atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'ND1', 'CE1', 'NE2', 'HNE', 'CD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"NE2":{'torsion': 0.0, 'tree': 'B', 'NC': 8, 'NB': 9, 'NA': 10, 'I': 11, 'angle': 109.0, 'blen': 1.31, 'charge': -0.444, 'type': 'NA'},
"ND1":{'torsion': 180.0, 'tree': 'S', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 122.0, 'blen': 1.39, 'charge': -0.527, 'type': 'NB'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CE1":{'torsion': 180.0, 'tree': 'S', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 10, 'angle': 108.0, 'blen': 1.32, 'charge': 0.384, 'type': 'CP'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 14, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 115.0, 'blen': 1.51, 'charge': 0.112, 'type': 'CC'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.213, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.06, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"loopList":[['CG', 'CD2']],
"HNE":{'torsion': 180.0, 'tree': 'E', 'NC': 9, 'NB': 10, 'NA': 11, 'I': 12, 'angle': 125.0, 'blen': 1.01, 'charge': 0.32, 'type': 'H'},
"CUT":['0.00000'],
"CD2":{'torsion': 0.0, 'tree': 'E', 'NC': 9, 'NB': 10, 'NA': 11, 'I': 13, 'angle': 110.0, 'blen': 1.36, 'charge': 0.122, 'type': 'CG'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 16, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'HISTIDINE EPSILONH COO- ANION',
},
"MET": { "SD":{'torsion': 180.0, 'tree': '3', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 110.0, 'blen': 1.81, 'charge': 0.737, 'type': 'S'},
"atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'SD', 'CE', 'LP1', 'LP2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"LP1":{'torsion': 80.0, 'tree': 'E', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 11, 'angle': 96.7, 'blen': 0.679, 'charge': -0.381, 'type': 'LP'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 13, 'I': 14, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"LP2":{'torsion': 280.0, 'tree': 'E', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 12, 'angle': 96.7, 'blen': 0.679, 'charge': -0.381, 'type': 'LP'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 109.47, 'blen': 1.525, 'charge': 0.09, 'type': 'C2'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.131, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.037, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CE":{'torsion': 180.0, 'tree': 'E', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 10, 'angle': 100.0, 'blen': 1.78, 'charge': 0.007, 'type': 'C3'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 13, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 13, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'METHIONINE COO- ANION',
},
"IDBGEN,IREST,ITYPF":['1', '1', '101'],
"ALA": { "atNameList":['N', 'HN', 'CA', 'CB', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CA', 'OXT', 'C', 'O'], ['CB', 'CA', 'N', 'C']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.209, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.031, 'type': 'C3'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'ALANINE COO- ANION',
},
"PHE": { "atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'CD1', 'CE1', 'CZ', 'CE2', 'CD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CA', 'OXT', 'C', 'O']],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CE1":{'torsion': 180.0, 'tree': 'S', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 10, 'angle': 120.0, 'blen': 1.4, 'charge': 0.004, 'type': 'CD'},
"CD1":{'torsion': 180.0, 'tree': 'S', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 120.0, 'blen': 1.4, 'charge': -0.011, 'type': 'CD'},
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"CZ":{'torsion': 0.0, 'tree': 'S', 'NC': 8, 'NB': 9, 'NA': 10, 'I': 11, 'angle': 120.0, 'blen': 1.4, 'charge': -0.003, 'type': 'CD'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 115.0, 'blen': 1.51, 'charge': 0.011, 'type': 'CA'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.208, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.038, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CE2":{'torsion': 0.0, 'tree': 'S', 'NC': 9, 'NB': 10, 'NA': 11, 'I': 12, 'angle': 120.0, 'blen': 1.4, 'charge': 0.004, 'type': 'CD'},
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 14, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"loopList":[['CG', 'CD2']],
"CUT":['0.00000'],
"CD2":{'torsion': 0.0, 'tree': 'E', 'NC': 10, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 120.0, 'blen': 1.4, 'charge': -0.011, 'type': 'CD'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 16, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'PHENYLALANINE COO- ANION',
},
"CYX": { "atNameList":['N', 'HN', 'CA', 'CB', 'SG', 'LP1', 'LP2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"SG":{'torsion': 180.0, 'tree': 'B', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 116.0, 'blen': 1.81, 'charge': 0.824, 'type': 'S'},
"LP1":{'torsion': 80.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 96.7, 'blen': 0.679, 'charge': -0.4045, 'type': 'LP'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 11, 'I': 12, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"LP2":{'torsion': 280.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 10, 'angle': 96.7, 'blen': 0.679, 'charge': -0.4045, 'type': 'LP'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.082, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.143, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 11, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 11, 'I': 13, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'CYSTINE(S-S BRIDGE) COO- ANION',
},
"PRO": { "atNameList":['N', 'CD', 'CG', 'CB', 'CA', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CUT":['0.00000'],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 8, 'NA': 9, 'I': 10, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['CB', 'CA', 'N', 'C'], ['-M', 'CA', 'N', 'CD'], ['CA', 'OXT', 'C', 'O']],
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 117.0, 'blen': 1.337, 'charge': -0.257, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 200.1, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 5, 'I': 6, 'angle': 103.2, 'blen': 1.5, 'charge': 0.03, 'type': 'C2'},
"CA":{'torsion': 175.2, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 8, 'angle': 120.6, 'blen': 1.451, 'charge': 0.112, 'type': 'CH'},
"CB":{'torsion': 338.3, 'tree': 'E', 'NC': 4, 'NB': 5, 'NA': 6, 'I': 7, 'angle': 106.0, 'blen': 1.51, 'charge': -0.001, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"loopList":[['CB', 'CA']],
"CD":{'torsion': 356.1, 'tree': 'S', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 126.1, 'blen': 1.458, 'charge': 0.084, 'type': 'C2'},
"C":{'torsion': 0.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 8, 'I': 9, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 8, 'NA': 9, 'I': 11, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'PROLINE COO- ANION',
},
"LYS": { "HNZ3":{'torsion': 300.0, 'tree': 'E', 'NC': 9, 'NB': 10, 'NA': 11, 'I': 14, 'angle': 109.47, 'blen': 1.01, 'charge': 0.311, 'type': 'H3'},
"HNZ2":{'torsion': 180.0, 'tree': 'E', 'NC': 9, 'NB': 10, 'NA': 11, 'I': 13, 'angle': 109.47, 'blen': 1.01, 'charge': 0.311, 'type': 'H3'},
"HNZ1":{'torsion': 60.0, 'tree': 'E', 'NC': 9, 'NB': 10, 'NA': 11, 'I': 12, 'angle': 109.47, 'blen': 1.01, 'charge': 0.311, 'type': 'H3'},
"atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'CD', 'CE', 'NZ', 'HNZ1', 'HNZ2', 'HNZ3', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 15, 'I': 16, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CD":{'torsion': 180.0, 'tree': 'S', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 109.47, 'blen': 1.525, 'charge': 0.048, 'type': 'C2'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"NZ":{'torsion': 180.0, 'tree': '3', 'NC': 8, 'NB': 9, 'NA': 10, 'I': 11, 'angle': 109.47, 'blen': 1.47, 'charge': -0.272, 'type': 'N3'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 109.47, 'blen': 1.525, 'charge': 0.053, 'type': 'C2'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.221, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.039, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CE":{'torsion': 180.0, 'tree': 'S', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 10, 'angle': 109.47, 'blen': 1.525, 'charge': 0.218, 'type': 'C2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 15, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 15, 'I': 17, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'LYSINE COO- ANION',
},
"NAMDBF":'db4.dat',
"SER": { "OG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 109.47, 'blen': 1.43, 'charge': -0.55, 'type': 'OH'},
"atNameList":['N', 'HN', 'CA', 'CB', 'OG', 'HOG', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 10, 'I': 12, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 10, 'I': 11, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.286, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.194, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 10, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"HOG":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 109.47, 'blen': 0.96, 'charge': 0.31, 'type': 'HO'},
"NAMRES":'SERINE COO- ANION',
},
"ASP": { "atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'OD1', 'OD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 11, 'I': 12, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"OD1":{'torsion': 90.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 117.2, 'blen': 1.26, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CB', 'OD1', 'CG', 'OD2'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'B', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 109.47, 'blen': 1.527, 'charge': 0.62, 'type': 'C'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.24, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': -0.208, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"OD2":{'torsion': 270.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 10, 'angle': 117.2, 'blen': 1.26, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 11, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 11, 'I': 13, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'ASPARTIC ACID COO- ANION',
},
"GLN": { "OE1":{'torsion': 0.0, 'tree': 'E', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 10, 'angle': 120.5, 'blen': 1.229, 'charge': -0.47, 'type': 'O'},
"atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'CD', 'OE1', 'NE2', 'HNE1', 'HNE2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"NE2":{'torsion': 180.0, 'tree': 'B', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 11, 'angle': 116.6, 'blen': 1.335, 'charge': -0.867, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CD":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 111.1, 'blen': 1.522, 'charge': 0.675, 'type': 'C'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CG', 'NE2', 'CD', 'OE1'], ['CD', 'HNE1', 'NE2', 'HNE2'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"HNE2":{'torsion': 180.0, 'tree': 'E', 'NC': 8, 'NB': 9, 'NA': 11, 'I': 13, 'angle': 119.8, 'blen': 1.01, 'charge': 0.344, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 109.47, 'blen': 1.525, 'charge': -0.043, 'type': 'C2'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.204, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.053, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HNE1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 9, 'NA': 11, 'I': 12, 'angle': 119.8, 'blen': 1.01, 'charge': 0.344, 'type': 'H'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 14, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 16, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'GLUTAMINE COO- ANION',
},
"GLU": { "OE1":{'torsion': 90.0, 'tree': 'E', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 10, 'angle': 117.2, 'blen': 1.26, 'charge': -0.706, 'type': 'O2'},
"atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'CD', 'OE1', 'OE2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 12, 'I': 14, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 12, 'I': 13, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CD":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 109.47, 'blen': 1.527, 'charge': 0.62, 'type': 'C'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CG', 'OE1', 'CD', 'OE2'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 109.47, 'blen': 1.51, 'charge': -0.208, 'type': 'C2'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.24, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.0, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 12, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OE2":{'torsion': 270.0, 'tree': 'E', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 11, 'angle': 117.2, 'blen': 1.26, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'GLUTAMIC ACID COO- ANION',
},
"TRP": { "NE1":{'torsion': 180.0, 'tree': 'B', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 10, 'angle': 107.0, 'blen': 1.43, 'charge': -0.33, 'type': 'NA'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 18, 'I': 20, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CA', 'OXT', 'C', 'O']],
"CH2":{'torsion': 180.0, 'tree': 'S', 'NC': 10, 'NB': 12, 'NA': 13, 'I': 14, 'angle': 116.0, 'blen': 1.39, 'charge': 0.034, 'type': 'CD'},
"CZ2":{'torsion': 180.0, 'tree': 'S', 'NC': 9, 'NB': 10, 'NA': 12, 'I': 13, 'angle': 128.0, 'blen': 1.4, 'charge': 0.029, 'type': 'CD'},
"INTX,KFORM":['INT', '1'],
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.02, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"loopList":[['CG', 'CD2'], ['CE2', 'CD2']],
"NAMRES":'TRYPTOPHAN COO- ANION',
"atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'CD1', 'NE1', 'HNE', 'CE2', 'CZ2', 'CH2', 'CZ3', 'CE3', 'CD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CD2":{'torsion': 0.0, 'tree': 'E', 'NC': 14, 'NB': 15, 'NA': 16, 'I': 17, 'angle': 117.0, 'blen': 1.4, 'charge': -0.275, 'type': 'CB'},
"CD1":{'torsion': 180.0, 'tree': 'S', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 127.0, 'blen': 1.34, 'charge': 0.117, 'type': 'CG'},
"CE3":{'torsion': 0.0, 'tree': 'S', 'NC': 13, 'NB': 14, 'NA': 15, 'I': 16, 'angle': 122.0, 'blen': 1.41, 'charge': 0.145, 'type': 'CD'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 18, 'I': 19, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 115.0, 'blen': 1.51, 'charge': 0.046, 'type': 'C*'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.242, 'type': 'CH'},
"CZ3":{'torsion': 0.0, 'tree': 'S', 'NC': 12, 'NB': 13, 'NA': 14, 'I': 15, 'angle': 121.0, 'blen': 1.35, 'charge': -0.082, 'type': 'CD'},
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"CE2":{'torsion': 0.0, 'tree': 'S', 'NC': 8, 'NB': 9, 'NA': 10, 'I': 12, 'angle': 109.0, 'blen': 1.31, 'charge': 0.0, 'type': 'CN'},
"HNE":{'torsion': 180.0, 'tree': 'E', 'NC': 8, 'NB': 9, 'NA': 10, 'I': 11, 'angle': 125.5, 'blen': 1.01, 'charge': 0.294, 'type': 'H'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 18, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
},
"GLY": { "atNameList":['N', 'HN', 'CA', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.24, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 110.4, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 9, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'GLYCINE COO- ANION',
},
"THR": { "atNameList":['N', 'HN', 'CA', 'CB', 'CG2', 'OG1', 'HOG', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 11, 'I': 13, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 11, 'I': 12, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CG2', 'CB', 'CA', 'OG1'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"OG1":{'torsion': 60.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 9, 'angle': 109.47, 'blen': 1.43, 'charge': -0.55, 'type': 'OH'},
"CG2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 109.47, 'blen': 1.525, 'charge': 0.007, 'type': 'C3'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.262, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'B', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.211, 'type': 'CH'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 11, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"HOG":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 9, 'I': 10, 'angle': 109.47, 'blen': 0.96, 'charge': 0.31, 'type': 'HO'},
"NAMRES":'THREONINE COO- ANION',
},
"HIP": { "atNameList":['N', 'HN', 'CA', 'CB', 'CG', 'ND1', 'HND', 'CE1', 'NE2', 'HNE', 'CD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"NE2":{'torsion': 0.0, 'tree': 'B', 'NC': 8, 'NB': 9, 'NA': 11, 'I': 12, 'angle': 109.0, 'blen': 1.31, 'charge': -0.686, 'type': 'NA'},
"ND1":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 7, 'NA': 8, 'I': 9, 'angle': 122.0, 'blen': 1.39, 'charge': -0.613, 'type': 'NA'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 15, 'I': 16, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CE1":{'torsion': 180.0, 'tree': 'S', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 11, 'angle': 108.0, 'blen': 1.32, 'charge': 0.719, 'type': 'CP'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 15, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 115.0, 'blen': 1.51, 'charge': 0.103, 'type': 'CC'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.189, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'S', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.211, 'type': 'C2'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"loopList":[['CG', 'CD2']],
"HND":{'torsion': 0.0, 'tree': 'E', 'NC': 7, 'NB': 8, 'NA': 9, 'I': 10, 'angle': 126.0, 'blen': 1.01, 'charge': 0.478, 'type': 'H'},
"HNE":{'torsion': 180.0, 'tree': 'E', 'NC': 9, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 125.0, 'blen': 1.01, 'charge': 0.486, 'type': 'H'},
"CUT":['0.00000'],
"CD2":{'torsion': 0.0, 'tree': 'E', 'NC': 9, 'NB': 11, 'NA': 12, 'I': 14, 'angle': 110.0, 'blen': 1.36, 'charge': 0.353, 'type': 'CG'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 15, 'I': 17, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'HISTIDINE PLUS ZWITTERION',
},
"VAL": { "atNameList":['N', 'HN', 'CA', 'CB', 'CG1', 'CG2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CG1":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 109.47, 'blen': 1.525, 'charge': 0.006, 'type': 'C3'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 10, 'I': 11, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CG1', 'CB', 'CA', 'CG2'], ['CA', 'OXT', 'C', 'O']],
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG2":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 9, 'angle': 109.47, 'blen': 1.525, 'charge': 0.006, 'type': 'C3'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.195, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'B', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 111.1, 'blen': 1.525, 'charge': 0.033, 'type': 'CH'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 10, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 10, 'I': 12, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'VALINE',
},
"ILE": { "atNameList":['N', 'HN', 'CA', 'CB', 'CG2', 'CG1', 'CD1', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"impropTors":[['-M', 'CA', 'N', 'HN'], ['CB', 'CA', 'N', 'C'], ['CG2', 'CB', 'CA', 'CG1'], ['CA', 'OXT', 'C', 'O']],
"CG1":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 9, 'angle': 109.47, 'blen': 1.525, 'charge': 0.017, 'type': 'C2'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 11, 'I': 12, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CD1":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 7, 'NA': 9, 'I': 10, 'angle': 109.47, 'blen': 1.525, 'charge': -0.001, 'type': 'C3'},
"HN":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.248, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.52, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG2":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 7, 'I': 8, 'angle': 109.47, 'blen': 1.525, 'charge': 0.001, 'type': 'C3'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.193, 'type': 'CH'},
"CB":{'torsion': 60.0, 'tree': 'B', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.47, 'blen': 1.525, 'charge': 0.03, 'type': 'CH'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 11, 'angle': 111.1, 'blen': 1.522, 'charge': 0.444, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 11, 'I': 13, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'ISOLEUCINE COO- ANION',
},
"filename":'unict.in',
}
| 108.420824
| 251
| 0.441079
| 8,810
| 49,982
| 2.50227
| 0.038252
| 0.084146
| 0.074847
| 0.102064
| 0.926287
| 0.915355
| 0.897755
| 0.887866
| 0.875255
| 0.861374
| 0
| 0.16978
| 0.147533
| 49,982
| 461
| 252
| 108.420824
| 0.347611
| 0
| 0
| 0.451193
| 0
| 0
| 0.347398
| 0.010584
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
526016193fd53885ebba0769c6af5d72a718baed
| 1,595
|
py
|
Python
|
Testing/fixture/login.py
|
redjoke01/My_prac_QA
|
7060a019a3efb0fdea7a452e3bc05938e69e2945
|
[
"Apache-2.0"
] | null | null | null |
Testing/fixture/login.py
|
redjoke01/My_prac_QA
|
7060a019a3efb0fdea7a452e3bc05938e69e2945
|
[
"Apache-2.0"
] | null | null | null |
Testing/fixture/login.py
|
redjoke01/My_prac_QA
|
7060a019a3efb0fdea7a452e3bc05938e69e2945
|
[
"Apache-2.0"
] | null | null | null |
# Фикстура открытия браузера
class OpenBrowser():
def __init__(self, app):
self.app = app
def login_pos(self, user="iieikt266", passw="Stud?133"):
wd = self.app.driver
wd.get("http://open.kbsu.ru/moodle/")
wd.find_element_by_name("username").send_keys("%s" % user)
wd.find_element_by_name("password").send_keys(passw)
wd.find_element_by_xpath("//input[@value='LOG IN']").click()
wd.find_element_by_xpath("//a[contains(text(),'КУРСЫ')]").click()
wd.find_element_by_xpath("//a[@id='label_2_2']/span").click()
wd.find_element_by_xpath("//a[contains(text(),'Выход')]").click()
def login_neg(self, user="iieikt266", passw="Stud?135"):
wd = self.app.driver
wd.get("http://open.kbsu.ru/moodle/")
wd.find_element_by_name("username").send_keys("%s" % user)
wd.find_element_by_name("password").send_keys(passw)
wd.find_element_by_xpath("//input[@value='LOG IN']").click()
elem = wd.find_element_by_xpath("//span[contains(.,'Вы не вошли в систему')]")
assert elem.text == "Вы не вошли в систему"
def login_empty(self, user="", passw=""):
wd = self.app.driver
wd.get("http://open.kbsu.ru/moodle/")
wd.find_element_by_name("username").send_keys("%s" % user)
wd.find_element_by_name("password").send_keys(passw)
wd.find_element_by_xpath("//input[@value='LOG IN']").click()
elem = wd.find_element_by_xpath("//span[contains(.,'Вы не вошли в систему')]")
assert elem.text == "Вы не вошли в систему"
| 37.093023
| 86
| 0.626959
| 230
| 1,595
| 4.1
| 0.252174
| 0.089077
| 0.193001
| 0.222694
| 0.845175
| 0.790032
| 0.790032
| 0.76246
| 0.76246
| 0.687169
| 0
| 0.010811
| 0.188088
| 1,595
| 42
| 87
| 37.97619
| 0.717375
| 0.016301
| 0
| 0.678571
| 0
| 0
| 0.290302
| 0.080283
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.142857
| false
| 0.214286
| 0
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
bfe8e787213cc360b17c6d2d7036534160740e6a
| 168
|
py
|
Python
|
project/all.py
|
danielbraga/hcap
|
a3ca0d6963cff19ed6ec0436cce84e2b41615454
|
[
"MIT"
] | null | null | null |
project/all.py
|
danielbraga/hcap
|
a3ca0d6963cff19ed6ec0436cce84e2b41615454
|
[
"MIT"
] | null | null | null |
project/all.py
|
danielbraga/hcap
|
a3ca0d6963cff19ed6ec0436cce84e2b41615454
|
[
"MIT"
] | null | null | null |
from django.core.exceptions import ImproperlyConfigured
from .run import *
start()
from locations.models import *
from users.models import *
from app.models import *
| 18.666667
| 55
| 0.791667
| 22
| 168
| 6.045455
| 0.545455
| 0.270677
| 0.240602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136905
| 168
| 8
| 56
| 21
| 0.917241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.833333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
871788830e6e96dd199f50d68d7336d60b8fa5c0
| 136,482
|
py
|
Python
|
fit_functions.py
|
adambrzosko/ml-htt-methods
|
5378b4a9747ea21702c7a48e5f3cbb4fc75a71fc
|
[
"MIT"
] | null | null | null |
fit_functions.py
|
adambrzosko/ml-htt-methods
|
5378b4a9747ea21702c7a48e5f3cbb4fc75a71fc
|
[
"MIT"
] | null | null | null |
fit_functions.py
|
adambrzosko/ml-htt-methods
|
5378b4a9747ea21702c7a48e5f3cbb4fc75a71fc
|
[
"MIT"
] | 1
|
2022-01-31T14:54:33.000Z
|
2022-01-31T14:54:33.000Z
|
import xgboost as xgb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
import plot_functions as pf
from scipy import interp
# from root_numpy import array2root
import json
import operator
import gc
# from eli5 import explain_prediction_xgboost
from keras.models import Sequential
from keras.initializers import RandomNormal
from keras.layers import Dense
from keras.layers import Activation
from keras.layers import *
from keras.optimizers import Nadam
from keras.optimizers import adam
from keras.regularizers import l2
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from sklearn.model_selection import KFold
from sklearn.utils import class_weight
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from pandas.plotting import scatter_matrix
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import mean_squared_error
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectFromModel
from sklearn.neural_network import MLPClassifier
# from bayes_opt import BayesianOptimization
def fit_ttsplit(X, channel, fold):
X["zfeld"] = np.fabs(X.eta_h - (X.jeta_1 + X.jeta_2)/2.)
X["centrality"] = np.exp(-4*(X.zfeld/np.fabs(X.jdeta))**2)
X["logPt1"] = np.log(X.pt_1)
X["logPt2"] = np.log(X.pt_2)
X_train,X_test, y_train,y_test,w_train,w_test = train_test_split(
X,
X['class'],
X['wt_xs'],
test_size=0.33,
random_state=123456,
stratify=X['class'].as_matrix(),
)
print(X.shape)
print(X_train[(X_train['class'] == 1)].shape)
print(X_test[(X_test['class'] == 1)].shape)
sum_w = X_train['wt_xs'].sum()
sum_w_cat = X_train.groupby('multi_class')['wt_xs'].sum()
class_weights = sum_w / sum_w_cat
class_weight_dict = dict(class_weights)
print(class_weight_dict)
# multiply w_train by class_weight now
for i in w_train.index:
for key, value in class_weight_dict.items():
if y_train[i] == key:
w_train.at[i] *= value
# if key == 'ggh':
# w_train.at[i] *= value * 1.
X_train = X_train.drop([
'event','wt','wt_xs','multi_class','process','class',
'jeta_1','jeta_2','eta_h','zfeld',
'pt_1','pt_2',
], axis=1).reset_index(drop=True)
X_test = X_test.drop([
'event','wt','wt_xs','multi_class','process','class',
'jeta_1','jeta_2','eta_h','zfeld',
'pt_1','pt_2',
], axis=1).reset_index(drop=True)
orig_columns = X_train.columns
X_train.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
X_test.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
print(orig_columns)
print(X_train.columns)
params = {
'objective':'binary:logistic',
'max_depth':4,
# 'min_child_weight':0,
'learning_rate':0.01,
'silent':1,
'n_estimators':10000,
'gamma':0.1,
'subsample':0.9,
# 'max_delta_step':1,
'nthread':-1,
'seed':123456
}
xgb_clf = xgb.XGBClassifier(**params)
xgb_clf.fit(
X_train,
y_train,
sample_weight = w_train,
early_stopping_rounds=50,
eval_set=[(X_train, y_train,w_train), (X_test, y_test,w_test)],
eval_metric = ['auc','logloss'],
verbose=True
)
# evals_result = xgb_clf.evals_result()
y_predict = xgb_clf.predict(X_test)
print(y_predict)
print(classification_report(
y_test,
y_predict,
target_names=["ggh", "qqh"],
sample_weight=w_test
))
y_pred = xgb_clf.predict_proba(X_test)
print(y_pred)
# proba_predict_train = xgb_clf.predict_proba(X_train)[:,1]
# proba_predict_test = xgb_clf.predict_proba(X_test)[:,1]
## 15% of highest probablilty output
# Make predictions for s and b
## SAVE FOR SKIP
# with open('fpr.pkl', 'w') as f:
# pickle.dump(fpr, f)
# with open('tpr.pkl', 'w') as f:
# pickle.dump(tpr, f)
# with open('auc.pkl', 'w') as f:
# pickle.dump(auc, f)
# with open('X_train.pkl', 'w') as f:
# pickle.dump(X_train, f)
# with open('y_train.pkl', 'w') as f:
# pickle.dump(y_train, f)
# with open('X_test.pkl', 'w') as f:
# pickle.dump(X_test, f)
# with open('y_test.pkl', 'w') as f:
# pickle.dump(y_test, f)
# with open('w_test.pkl', 'w') as f:
# pickle.dump(w_test, f)
# with open('w_train.pkl', 'w') as f:
# pickle.dump(w_train, f)
with open('binary_{}_fold{}_xgb.pkl'.format(channel,fold), 'w') as f:
pickle.dump(xgb_clf, f)
print(xgb_clf.feature_importances_)
auc = roc_auc_score(y_test, y_pred[:,1])
print(auc)
fpr, tpr, _ = roc_curve(y_test, y_pred[:,1])
pf.plot_roc_curve(
fpr, tpr, auc,
'{}_fold{}_roc.pdf'.format(channel, fold))
# Define these so that I can use plot_output()
xg_train = xgb.DMatrix(
X_train,
label=y_train,
# missing=-9999,
weight=w_train
)
xg_test = xgb.DMatrix(
X_test,
label=y_test,
# missing=-9999,
weight=w_test
)
pf.plot_features(
xgb_clf,#.booster(),
'weight',
'binary_{}_fold{}_features_weight.pdf'.format(channel,fold))
pf.plot_features(
xgb_clf,#.booster(),
'gain',
'binary_{}_fold{}_features_gain.pdf'.format(channel,fold))
pf.plot_output(
xgb_clf,#.booster(),
xg_train, xg_test,
y_train, y_test,
'binary_{}_fold{}_output.pdf'.format(channel,fold))
y_prediction = xgb_clf.predict(X_test)
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
classes=["qqh", "ggh"],
figname='binary_{}_fold{}_non-normalised_weights_cm.pdf'.format(channel,fold))
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
classes=["qqh", "ggh"],
figname='binary_{}_fold{}_normalised_weights_cm.pdf'.format(channel,fold),
normalise_by_row=True)
return None
def fit_rhottsplit(X, channel, fold):
# X["Egamma1_tau1"] = X.Egamma1_1 / X.E_1
# X["Egamma2_tau1"] = X.Egamma2_1 / X.E_1
# X["Egamma3_tau1"] = X.Egamma3_1 / X.E_1
# X["Egamma4_tau1"] = X.Egamma4_1 / X.E_1
# X["Egamma1_tau2"] = X.Egamma1_2 / X.E_2
# X["Egamma2_tau2"] = X.Egamma2_2 / X.E_2
# X["Egamma3_tau2"] = X.Egamma3_2 / X.E_2
# X["Egamma4_tau2"] = X.Egamma4_2 / X.E_2
# X["Egamma1_pi01"] = X.Egamma1_1 / X.Epi0_1
# X["Egamma2_pi01"] = X.Egamma2_1 / X.Epi0_1
# X["Egamma3_pi01"] = X.Egamma3_1 / X.Epi0_1
# X["Egamma4_pi01"] = X.Egamma4_1 / X.Epi0_1
# X["Egamma1_pi02"] = X.Egamma1_2 / X.Epi0_2
# X["Egamma2_pi02"] = X.Egamma2_2 / X.Epi0_2
# X["Egamma3_pi02"] = X.Egamma3_2 / X.Epi0_2
# X["Egamma4_pi02"] = X.Egamma4_2 / X.Epi0_2
# X["Epi_tau_1"] = X.Epi_1 / X.E_1
# X["Epi_tau_2"] = X.Epi_2 / X.E_2
print((X.Mrho))
X_train,X_test, y_train,y_test,w_train,w_test = train_test_split(
X,
X['class'],
X['wt_xs'],
test_size=0.33,
random_state=123456,
stratify=X['class'].as_matrix(),
)
print(X.shape)
print(X_train[(X_train['class'] == 1)].shape)
print(X_test[(X_test['class'] == 1)].shape)
sum_w = X_train['wt_xs'].sum()
sum_w_cat = X_train.groupby('class')['wt_xs'].sum()
class_weights = sum_w / sum_w_cat
class_weight_dict = dict(class_weights)
print(class_weight_dict)
# multiply w_train by class_weight now
# for i in w_train.index:
# for key, value in class_weight_dict.iteritems():
# if y_train[i] == key:
# w_train.at[i] *= value
# if key == 'ggh':
# w_train.at[i] *= value * 1.
X_train = X_train.drop([
'event','wt','wt_xs','multi_class','process','class',
'tauFlag1','tauFlag2',
'Egamma1','Egamma2','Egamma3','Egamma4',
], axis=1).reset_index(drop=True)
X_test = X_test.drop([
'event','wt','wt_xs','multi_class','process','class',
'tauFlag1','tauFlag2',
'Egamma1','Egamma2','Egamma3','Egamma4',
], axis=1).reset_index(drop=True)
orig_columns = X_train.columns
X_train.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
X_test.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
print(orig_columns)
print(X_train.columns)
params = {
'objective':'binary:logistic',
'max_depth':4,
# 'min_child_weight':0,
'learning_rate':0.01,
'silent':1,
'n_estimators':10000,
'gamma':0.1,
'subsample':0.9,
# 'max_delta_step':1,
'nthread':-1,
'seed':123456
}
xgb_clf = xgb.XGBClassifier(**params)
xgb_clf.fit(
X_train,
y_train,
sample_weight = w_train,
early_stopping_rounds=50,
eval_set=[(X_train, y_train,w_train), (X_test, y_test,w_test)],
eval_metric = ['auc','logloss'],
verbose=True
)
# evals_result = xgb_clf.evals_result()
y_predict = xgb_clf.predict(X_test)
print(y_predict)
print(classification_report(
y_test,
y_predict,
target_names=["ggh_rho", "ggh_bkg"],
sample_weight=w_test
))
y_pred = xgb_clf.predict_proba(X_test)
# proba_predict_train = xgb_clf.predict_proba(X_train)[:,1]
# proba_predict_test = xgb_clf.predict_proba(X_test)[:,1]
## 15% of highest probablilty output
# Make predictions for s and b
## SAVE FOR SKIP
# with open('fpr.pkl', 'w') as f:
# pickle.dump(fpr, f)
# with open('tpr.pkl', 'w') as f:
# pickle.dump(tpr, f)
# with open('auc.pkl', 'w') as f:
# pickle.dump(auc, f)
# with open('X_train.pkl', 'w') as f:
# pickle.dump(X_train, f)
# with open('y_train.pkl', 'w') as f:
# pickle.dump(y_train, f)
# with open('X_test.pkl', 'w') as f:
# pickle.dump(X_test, f)
# with open('y_test.pkl', 'w') as f:
# pickle.dump(y_test, f)
# with open('w_test.pkl', 'w') as f:
# pickle.dump(w_test, f)
# with open('w_train.pkl', 'w') as f:
# pickle.dump(w_train, f)
with open('RhoID/binary_{}_fold{}_xgb.pkl'.format(channel,fold), 'w') as f:
pickle.dump(xgb_clf, f)
auc = roc_auc_score(y_test, y_pred[:,1])
print(auc)
fpr, tpr, _ = roc_curve(y_test, y_pred[:,1])
pf.plot_roc_curve(
fpr, tpr, auc,
'RhoID/{}_fold{}_roc.pdf'.format(channel, fold))
# Define these so that I can use plot_output()
xg_train = xgb.DMatrix(
X_train,
label=y_train,
# missing=-9999,
weight=w_train
)
xg_test = xgb.DMatrix(
X_test,
label=y_test,
# missing=-9999,
weight=w_test
)
pf.plot_features(
xgb_clf,#.booster(),
'weight',
'RhoID/binary_{}_fold{}_features_weight.pdf'.format(channel,fold))
pf.plot_features(
xgb_clf,#.booster(),
'gain',
'RhoID/binary_{}_fold{}_features_gain.pdf'.format(channel,fold))
pf.plot_output(
xgb_clf,#.booster(),
xg_train, xg_test,
y_train, y_test,
'RhoID/binary_{}_fold{}_output.pdf'.format(channel,fold))
y_prediction = xgb_clf.predict(X_test)
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
classes=["ggh_bkg", "ggh_rho"],
figname='RhoID/binary_{}_fold{}_non-normalised_weights_cm.pdf'.format(channel,fold))
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
classes=["ggh_bkg", "ggh_rho"],
figname='RhoID/binary_{}_fold{}_normalised_weights_cm.pdf'.format(channel,fold),
normalise_by_row=True)
return None
def fit_noisejets_ttsplit(X, channel, fold):
X_train,X_test, y_train,y_test,w_train,w_test = train_test_split(
X,
X['class'],
X['wt'],
test_size=0.2,
random_state=123456,
stratify=X['class'].as_matrix(),
)
print(X.shape)
sum_w = X_train['wt'].sum()
sum_w_cat = X_train.groupby('class')['wt'].sum()
class_weights = sum_w / sum_w_cat
class_weight_dict = dict(class_weights)
print(class_weight_dict)
# multiply w_train by class_weight now
# print(X_train["multi_class"])
# print(w_train)
# for i in w_train.index:
# for key, value in class_weight_dict.iteritems():
# if y_train[i] == key:
# w_train.at[i] *= value
# print(X_train["multi_class"])
# print(w_train)
X_train = X_train.drop([
'event','wt','class',#'multi_class',
'dphi_jtt',
'jphi_1','jpt_1'
], axis=1).reset_index(drop=True)
X_test = X_test.drop([
'event','wt','class',#'multi_class',
'dphi_jtt',
'jphi_1','jpt_1'
], axis=1).reset_index(drop=True)
# orig_columns = X_train.columns
# X_train.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
# X_test.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
# print(orig_columns)
print(X_train.columns)
params = {
'objective':'binary:logistic',
'max_depth':4,
'learning_rate':0.01,
'silent':1,
'n_estimators':10000,
# 'subsample':0.9,
# 'max_delta_step':1,
'nthread':-1,
'seed':123456
}
xgb_clf = xgb.XGBClassifier(**params)
xgb_clf.fit(
X_train,
y_train,
# w_train,
early_stopping_rounds=20,
# eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
eval_set=[(X_train, y_train), (X_test, y_test)],
eval_metric = 'auc',
verbose=True
)
# evals_result = xgb_clf.evals_result()
y_predict = xgb_clf.predict(X_test)
print(y_predict)
print('true label: {},{},{}'.format(y_test.values[0],y_test.values[1],y_test.values[2]))
print('predicted label: {},{},{}'.format(y_predict[0],y_predict[1],y_predict[2]))
print(classification_report(
y_test,
y_predict,
target_names=["data_genuine", "data_noise"],
))
y_pred = xgb_clf.predict_proba(X_test)
# proba_predict_train = xgb_clf.predict_proba(X_train)[:,1]
# proba_predict_test = xgb_clf.predict_proba(X_test)[:,1]
## SAVE FOR SKIP
with open('noisejetID/binary_{}_fold{}_xgb.pkl'.format(channel,fold), 'w') as f:
pickle.dump(xgb_clf, f)
auc = roc_auc_score(y_test, y_pred[:,1])
print(auc)
fpr, tpr, _ = roc_curve(y_test, y_pred[:,1])
pf.plot_roc_curve(
fpr, tpr, auc,
'noisejetID/{}_fold{}_roc.pdf'.format(channel, fold))
xgb_clf.save_model("noisejetID/binary_{}_fold{}_xgb.model".format(channel,fold))
y_prediction = xgb_clf.predict(X_test)
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
classes=["data_noise", "data_genuine"],
figname='noisejetID/binary_{}_fold{}_non-normalised_weights_cm.pdf'.format(channel,fold))
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
classes=["data_noise", "data_genuine"],
figname='noisejetID/binary_{}_fold{}_normalised_weights_cm.pdf'.format(channel,fold),
normalise_by_row=True)
# Define these so that I can use plot_output()
xg_train = xgb.DMatrix(
X_train.values,
label=y_train.values,
# missing=-9999,
weight=w_train.values
)
xg_test = xgb.DMatrix(
X_test.values,
label=y_test.values,
# missing=-9999,
weight=w_test.values
)
print("bla bla")
pf.plot_features(
xgb_clf,#.booster(),
'weight',
'noisejetID/binary_{}_fold{}_features_weight.pdf'.format(channel,fold))
pf.plot_features(
xgb_clf,#.booster(),
'gain',
'noisejetID/binary_{}_fold{}_features_gain.pdf'.format(channel,fold))
pf.plot_output(
xgb_clf,#.booster(),
xg_train, xg_test,
y_train.values, y_test.values,
'noisejetID/binary_{}_fold{}_output.pdf'.format(channel,fold))
return None
def fit_sssplit(X, folds, channel, sig_sample):
## STRATIFIED SHUFFLE K FOLD
sss = StratifiedShuffleSplit(n_splits=folds, test_size=0.3, random_state=123456)
X = X.sample(frac=1).reset_index(drop=True)
y = X['class']
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train_index, test_index) in enumerate(sss.split(X, y)):
print('Fold {}/{}'.format(i+1, folds))
X_train, X_test = X.loc[train_index,:], X.loc[test_index,:]
y_train, y_test = y[train_index], y[test_index]
w_train, w_test = X_train['wt'], X_test['wt']
X_train = X_train.drop(['wt', 'class'], axis=1).reset_index(drop=True)
X_test = X_test.drop(['wt', 'class'], axis=1).reset_index(drop=True)
sum_wpos = np.sum(w_train[y_train == 1])
sum_wneg = np.sum(w_train[y_train == 0])
ratio = sum_wneg / sum_wpos
params = {
'objective':'binary:logistic',
'max_depth':3,
'min_child_weight':10,
'learning_rate':0.01,
'silent':1,
'scale_pos_weight':ratio,
'n_estimators':2000,
# 'gamma':0.1,
'subsample':0.9,
'colsample_bytree':0.9,
# 'max_delta_step':1,
'nthread':-1,
'seed':123456
}
xgb_clf = xgb.XGBClassifier(**params)
xgb_clf.fit(
X_train,
y_train,
sample_weight = w_train,
early_stopping_rounds=50,
eval_set=[(X_train, y_train), (X_test, y_test)],
eval_metric = ['mae', 'auc'],
verbose=True
)
probas_ = xgb_clf.predict_proba(X_test)
fpr, tpr, _ = roc_curve(y_test.ravel(), probas_[:,1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
fig, ax = plt.subplots()
ax.plot(fpr, tpr, lw=1, alpha=0.3)
#, label='ROC fold {0} (AUC = {1:.2f})'.format(i, roc_auc))
i += 1
ax.plot([0,1], [0,1], 'k--')
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(
mean_fpr,
mean_tpr,
'b',
label=r'Mean ROC (AUC = {:.2f} $\pm$ {:.2f})'.format(mean_auc, std_auc))
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color='grey',
alpha=.2,
label=r'$\pm$ 1 std deviation'
)
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.grid()
ax.legend(loc='lower right')
fig.savefig('{}fold_roc_{}_{}.pdf'.format(folds, channel, sig_sample))
return None
def fit_gbc_ttsplit(X, channel, sig_sample):
X = X.sample(frac=1).reset_index(drop=True)
X_train,X_test, y_train,y_test,w_train,w_test = train_test_split(
X,
X['class'],
X['wt'],
test_size=0.30,
random_state=123456,
)
## SOME TESTS WITH WEIGHTS
# w_train *= (sum(w) / sum(w_train))
# w_test *= (sum(w) / sum(w_test))
sum_wpos = np.sum(w_train[y_train == 1])
sum_wneg = np.sum(w_train[y_train == 0])
ratio = sum_wneg / sum_wpos
X_train = X_train.drop(['wt', 'class', 'eta_1', 'eta_2'], axis=1).reset_index(drop=True)
X_test = X_test.drop(['wt', 'class', 'eta_1', 'eta_2'], axis=1).reset_index(drop=True)
# if channel == 'tt':
if sig_sample in ['powheg', 'JHU']:
params = {
'loss':'deviance',
'max_depth':3,
# 'min_child_weight':1,
'learning_rate':0.1,
'verbose':1,
# 'scale_pos_weight':ratio,
# 'min_samples_leaf':600,
'n_estimators':100,
'subsample':0.7,
# 'colsample_bytree':0.8,
# 'max_delta_step':1,
'random_state':123456
}
# if sig_sample == 'JHU':
# params = {
# 'objective':'binary:logistic',
# 'max_depth':9,
# 'min_child_weight':1,
# 'learning_rate':0.01,
# 'silent':1,
# 'scale_pos_weight':ratio,
# 'n_estimators':2000,
# 'gamma':2.0,
# 'subsample':0.9,
# 'colsample_bytree':0.9,
# # 'max_delta_step':1,
# 'nthread':-1,
# 'seed':123456
# }
gbc_clf = GradientBoostingClassifier(**params)
gbc_clf.fit(
X_train,
y_train,
sample_weight = w_train,
)
# evals_result = gbc_clf.evals_result()
y_predict = gbc_clf.predict(X_test)
print(y_predict)
print(classification_report(
y_test,
y_predict,
target_names=["background", "signal"],
sample_weight=w_test
))
decisions = gbc_clf.decision_function(X_test)
# proba_predict_train = gbc_clf.predict_proba(X_train)[:,1]
# proba_predict_test = gbc_clf.predict_proba(X_test)[:,1]
## 15% of highest probablilty output
# Make predictions for s and b
fpr, tpr, _ = roc_curve(y_test, decisions)
roc_auc = auc(fpr,tpr)
print(roc_auc)
# pf.plot_roc_curve(
# fpr, tpr, roc_auc,
# 'gbc_{}_{}_roc.pdf'.format(channel, sig_sample))
# pf.compare_train_test(gbc_clf, X_train, y_train, X_test, y_test, 'gbc_{}_{}_output.pdf'.format(channel, sig_sample), bins=30)
# Define these so that I can use plot_output()
# xg_train = gbc.DMatrix(
# X_train,
# label=y_train,
# # missing=-9999,
# weight=w_train
# )
# xg_test = gbc.DMatrix(
# X_test,
# label=y_test,
# # missing=-9999,
# weight=w_test
# )
# pf.plot_output(
# gbc_clf.booster(),
# xg_train, xg_test,
# y_train, y_test,
# '{}_{}_output.pdf'.format(channel, sig_sample))
# pf.plot_features(
# gbc_clf.booster(),
# 'weight',
# '{}_{}_features_weight.pdf'.format(channel, sig_sample))
# pf.plot_features(
# gbc_clf.booster(),
# 'gain',
# '{}_{}_features_gain.pdf'.format(channel, sig_sample))
# y_prediction = gbc_clf.predict(X_test)
# pf.plot_confusion_matrix(
# y_test, y_prediction, w_test,
# classes=['background', 'signal'],
# figname='{}_{}_non-normalised_weights_cm.pdf'.format(channel, sig_sample),
# normalise=False)
# pf.plot_confusion_matrix(
# y_test, y_prediction, w_test,
# classes=['background', 'signal'],
# figname='{}_{}_normalised_weights_cm.pdf'.format(channel, sig_sample),
# normalise=True)
# ## SAVE FOR SKIP
# # with open('fpr.pkl', 'w') as f:
# # pickle.dump(fpr, f)
# # with open('tpr.pkl', 'w') as f:
# # pickle.dump(tpr, f)
# # with open('auc.pkl', 'w') as f:
# # pickle.dump(auc, f)
# # with open('X_train.pkl', 'w') as f:
# # pickle.dump(X_train, f)
# # with open('y_train.pkl', 'w') as f:
# # pickle.dump(y_train, f)
# # with open('X_test.pkl', 'w') as f:
# # pickle.dump(X_test, f)
# # with open('y_test.pkl', 'w') as f:
# # pickle.dump(y_test, f)
# # with open('w_test.pkl', 'w') as f:
# # pickle.dump(w_test, f)
# # with open('w_train.pkl', 'w') as f:
# # pickle.dump(w_train, f)
with open('skl_{}_{}_gbc.pkl'.format(channel, sig_sample), 'w') as f:
pickle.dump(gbc_clf, f)
return None
def custom_mean_squared_error(y_predicted, y_true):
labels = y_true.get_label()
assert len(y_predicted) == len(labels)
preds = []
for ls in y_predicted:
preds.append(max([(v,i) for i,v in enumerate(ls)]))
np_preds = np.array(preds)
pred_labels = np_preds[:,1]
error = np.subtract(pred_labels, labels)
return 'custom_mean_squared_error', np.mean(np.square(error))
def custom_exponential_loss(y_predicted, y_true):
labels = y_true.get_label()
assert len(y_predicted) == len(labels)
preds = []
for ls in y_predicted:
preds.append(max([(v,i) for i,v in enumerate(ls)]))
np_preds = np.array(preds)
pred_labels = np_preds[:,1]
factor = labels * pred_labels
return 'custom_exponential_loss', - np.exp((1./len(labels)) * np.mean(factor))
def custom_f1_score(y_predicted, y_true):
labels = y_true.get_label()
assert len(y_predicted) == len(labels)
preds = []
for ls in y_predicted:
preds.append(max([(v,i) for i,v in enumerate(ls)]))
# labels_ggh = [x for ind,x in enumerate(labels) if x ==0]
# ind_labels_ggh = [ind for ind,x in enumerate(labels) if x ==0]
np_preds = np.array(preds)
# np_preds = np_preds[ind_labels_ggh]
pred_labels = np_preds[:,1]
# print "labels",labels_ggh
# print "pred_labels",pred_labels
# f1 = f1_score(labels_ggh,pred_labels,average='micro')
f1 = f1_score(labels,pred_labels,average='weighted')
return 'custom_f1_score', 1./f1
def custom_fbeta_score(y_predicted, y_true):
labels = y_true.get_label()
assert len(y_predicted) == len(labels)
preds = []
for ls in y_predicted:
preds.append(max([(v,i) for i,v in enumerate(ls)]))
np_preds = np.array(preds)
pred_labels = np_preds[:,1]
fbeta = fbeta_score(labels,pred_labels,beta=5,average='weighted')
return 'custom_fbeta_score', 1./fbeta
def fit_multiclass_ttsplit(X, analysis, channel, sig_sample):
# use 'wt_xs' as event weights
# but calculate class weights for training
# later using 'wt'
# actually using scaled weights straight
# because of better performance
X_train,X_test, y_train,y_test,w_train,w_test = train_test_split(
X,
X['multi_class'],
X['wt_xs'],
test_size=0.5,
random_state=123456,
)
## FINISH THIS FOR CLASS WEIGHTS CALC
# class_weights = compute_class_weights(X_train)
# print class_weights
# sum_w = X_train['wt'].sum()
# print sum_w
# data_gb = X_train.groupby('multi_class')
# dict_data_gb = {x: data_gb.get_group(x) for x in data_gb.groups}
# print dict_data_gb
# class_weights = []
# # calculate sum of event weights per category
# sum_w_cat = []
# for cat in X_train['multi_class']:
# if X_train['multi_class'] == cat:
# sum_w_cat.append(X_train['wt'])
# print 'individual', sum_w_cat
# print 'full cat', sum_w_cat
# try:
# print 'category {}'.format(cat)
# weights = sum_w / sum_w_cat
# print weights
# class_weights.append(weights)
# except ZeroDivisionError:
# 'Cannot divide by zero'
# print class_weights
sum_w = X_train['wt_xs'].sum()
# print 'sum_w', sum_w
sum_w_cat = X_train.groupby('multi_class')['wt_xs'].sum()
# print 'sum_w_cat', sum_w_cat
class_weights = sum_w / sum_w_cat
class_weight_dict = dict(class_weights)
print(class_weight_dict)
# multiply w_train by class_weight now
for i in w_train.index:
for key, value in class_weight_dict.items():
# print 'before: ',index, row
if y_train[i] == key:
if key == 'ggh':
w_train.at[i] *= value
else:
w_train.at[i] *= value
# print 'after dividing by class_weight: ',index, row
## use one-hot encoding
# encode class values as integers
encoder_train = LabelEncoder()
encoder_test = LabelEncoder()
encoder_train.fit(y_train)
y_train = encoder_train.transform(y_train)
encoder_test.fit(y_test)
y_test = encoder_test.transform(y_test)
# test_class_weight = class_weight.compute_class_weight(
# 'balanced', np.unique(encoded_Y), encoded_Y
# )
# print test_class_weight
# print 'original Y: ', X_train['multi_class'].head()
# print 'one-hot y: ', y_train
X_train = X_train.drop([
'wt', 'wt_xs', 'process', 'multi_class', 'class', 'event',
'gen_match_1', 'gen_match_2'
], axis=1).reset_index(drop=True)
X_test = X_test.drop([
'wt', 'wt_xs', 'process', 'multi_class', 'class', 'event',
'gen_match_1', 'gen_match_2'
], axis=1).reset_index(drop=True)
print(X_train.shape)
print(X_test.shape)
## standard scaler
# columns = X_train.columns
# scaler = StandardScaler()
# np_scaled_train = scaler.fit_transform(X_train.as_matrix())
# del X_train
# X_train = pd.DataFrame(np_scaled_train)
# X_train.columns = columns
# np_scaled_test = scaler.fit_transform(X_test.as_matrix())
# del X_test
# X_test = pd.DataFrame(np_scaled_test)
# X_test.columns = columns
## SOME TESTS WITH WEIGHTS
# w_train *= (sum(w) / sum(w_train))
# w_test *= (sum(w) / sum(w_test))
# sum_wpos = np.sum(w_train[y_train == 1])
# sum_wneg = np.sum(w_train[y_train != 1])
# ratio = sum_wneg / sum_wpos
# X_train = X_train.drop(['wt', 'class', 'eta_1', 'eta_2'], axis=1).reset_index(drop=True)
# X_test = X_test.drop(['wt', 'class', 'eta_1', 'eta_2'], axis=1).reset_index(drop=True)
# if channel == 'tt':
# if sig_sample == 'powheg':
# params = {
# 'objective':'multi:softprob',
# 'max_depth':3,
# 'min_child_weight':1,
# 'learning_rate':0.01,
# 'silent':1,
# # 'scale_pos_weight':ratio,
# 'n_estimators':2000,
# 'gamma':1.0,
# 'subsample':0.7,
# 'colsample_bytree':0.8,
# 'max_delta_step':1,
# 'nthread':-1,
# 'seed':123456
# }
if sig_sample in ['powheg']:
if channel in ['tt','mt','et','em']:
params = {
'objective':'multi:softprob',
'max_depth':8,
# 'min_child_weight':1,
'learning_rate':0.005,
'silent':1,
# 'scale_pos_weight':ratio,
'n_estimators':500,
'gamma':0,
'subsample':0.8,
'colsample_bytree':0.8,
# 'max_delta_step':3,
'nthread':-1,
'missing':-9999,
'seed':123456
}
if sig_sample in ['JHU']:
if channel in ['tt','mt','et','em']:
params = {
'objective':'multi:softprob',
'max_depth':5,
# 'min_child_weight':1,
'learning_rate':0.025,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':300,
'gamma':0,
'subsample':0.8,
'colsample_bytree':0.8,
# 'max_delta_step':5,
'nthread':-1,
'missing':-9999,
'seed':123456
}
# if channel in ['mt']:
# params = {
# 'objective':'multi:softprob',
# 'max_depth':8,
# # 'min_child_weight':1,
# 'learning_rate':0.025,
# 'silent':1,
# # 'scale_pos_weight':ratio,
# 'n_estimators':100,
# # 'gamma':2.0,
# 'subsample':0.9,
# 'colsample_bytree':0.9,
# # 'max_delta_step':1,
# 'nthread':-1,
# 'seed':123456
# }
# if channel in ['et']:
# params = {
# 'objective':'multi:softprob',
# 'max_depth':7,
# 'min_child_weight':1,
# 'learning_rate':0.025,
# 'silent':1,
# # 'scale_pos_weight':ratio,
# 'n_estimators':100,
# 'gamma':2.0,
# 'subsample':0.9,
# 'colsample_bytree':0.9,
# # 'max_delta_step':1,
# 'nthread':-1,
# 'seed':123456
# }
# if channel == 'em':
# params = {
# 'objective':'multi:softprob',
# 'max_depth':8,
# 'min_child_weight':1,
# 'learning_rate':0.025,
# 'silent':1,
# # 'scale_pos_weight':ratio,
# 'n_estimators':100,
# 'gamma':2.0,
# 'subsample':0.9,
# 'colsample_bytree':0.9,
# 'max_delta_step':1,
# 'nthread':-1,
# 'seed':123456
# }
xgb_clf = xgb.XGBClassifier(**params)
xgb_clf.fit(
X_train,
y_train,
sample_weight = w_train,
early_stopping_rounds=100,
eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
eval_metric = ['merror'],
verbose=True
)
# evals_result = xgb_clf.evals_result()
y_predict = xgb_clf.predict(X_test)
print('true label: {},{},{}'.format(y_test[0],y_test[1],y_test[2]))
print('predicted label: {},{},{}'.format(y_predict[0],y_predict[1],y_predict[2]))
print('\n Mean Square Error: {}'.format(mean_squared_error(y_test,y_predict)))
print(classification_report(
y_test,
y_predict,
# target_names=["background", "signal"],
target_names=list(encoder_test.classes_),
sample_weight=w_test
))
y_pred = xgb_clf.predict_proba(X_test)
print('highest proba: {},{},{}'.format(max(y_pred[0]),max(y_pred[1]),max(y_pred[2])))
with open('multi_{}_{}_{}_xgb.pkl'.format(analysis, channel, sig_sample), 'w') as f:
pickle.dump(xgb_clf, f)
# Define these so that I can use plot_output()
xg_train = xgb.DMatrix(
X_train,
label=y_train,
# missing=-9999,
weight=w_train
)
xg_test = xgb.DMatrix(
X_test,
label=y_test,
# missing=-9999,
weight=w_test
)
# pf.plot_output(
# xgb_clf.booster(),
# xg_train, xg_test,
# y_train, y_test,
# 'multi_{}_{}_output.pdf'.format(channel, sig_sample))
pf.plot_features(
xgb_clf.booster(),
'weight',
'multi_{}_{}_{}_features_weight.pdf'.format(analysis, channel, sig_sample))
pf.plot_features(
xgb_clf.booster(),
'gain',
'multi_{}_{}_{}_features_gain.pdf'.format(analysis, channel, sig_sample))
y_prediction = xgb_clf.predict(X_test)
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
# classes=['background', 'signal'],
classes=list(encoder_test.classes_),
figname='multi_{}_{}_{}_non-normalised_weights_cm.pdf'.format(analysis, channel, sig_sample),
normalise=False)
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
classes=list(encoder_test.classes_),
figname='multi_{}_{}_{}_normalised_weights_cm.pdf'.format(analysis, channel, sig_sample),
normalise=True)
return None
def fit_multiclass_kfold(X, fold, analysis, channel, sig_sample, mjj_training):
## START EDITING THIS FOR ODD/EVEN SPLIT
print('Training XGBoost model fold{}'.format(fold))
print(X.columns)
print(X[X.multi_class == "ggh"].wt_xs)
if mjj_training == "high":
X = X[X["multi_class"] != "misc"]
if channel == "em":
X = X[X["multi_class"] != "qcd"]
# merge ggh and qqh
# X.multi_class.replace("qqh","ggh",inplace=True)
# drop ggh entirely and train for qqh
X = X[X["multi_class"] != "ggh"]
# for x in X.columns:
# if x in ["pt_h"]:
# X["exp_{}".format(str(x))] = np.exp(X[str(x)])
# X["log_{}".format(str(x))] = np.log(X[str(x)])
# X["{}_sq".format(str(x))] = X[str(x)]**2
# X["{}_cb".format(str(x))] = X[str(x)]**3
# X["{}_tanh".format(str(x))] = np.tanh(X[str(x)])
# make new variable combinatinos
# X["mjj_jdeta"] = X.mjj * X.jdeta
# X["dijetpt_pth"] = X.dijetpt * X.pt_h
# X["dijetpt_jpt1"] = X.dijetpt * X.jpt_1
# X["exp_dijetpt_jpt1"] = np.exp(-30000*(X.dijetpt/X.jpt_1))
# X["dijetpt_pth_over_pt1"] = X.dijetpt_pth/X.pt_1
# X["msv_mvis"] = X.m_sv / X.m_vis
# X["msvsq_mvis"] = X.m_sv**2 / X.m_vis
# X["msv_sq"] = np.log(X.m_vis/X.m_sv**2)
# X["log_metsq_jeta2"] = np.fabs(np.log(X.met**2 * np.fabs(X.jeta_2)))
# X["met_jeta2"] = X.met * np.fabs(X.jeta_2)
# X["oppsides_centrality"] = X.opp_sides * X.centrality
# X["pthsq_ptvis"] = X.pt_h**2 / X.pt_vis
X["dphi_custom"] = np.arccos(1-X.mt_lep**2/(2.*X.pt_1*X.pt_2))
X["dR_custom"] = np.sqrt((X.eta_1-X.eta_2)**2 + (X.dphi_custom)**2)
# X["msv_rec"] = 1. / X.m_sv
# X["rms_pt"] = np.sqrt(0.5 * (X.pt_1**2 + X.pt_2**2))
# X["rms_jpt"] = np.sqrt(0.5 * (X.jpt_1**2 + X.jpt_2**2))
# X["centrality_l1"] = np.exp(-4*np.fabs(X.eta_1-(X.jeta_1 + X.jeta_2)/2.)/X.jdeta**2)
# X["centrality_l2"] = np.exp(-4*np.fabs(X.eta_2-(X.jeta_1 + X.jeta_2)/2.)/X.jdeta**2)
# X["centrality_l"] = X.centrality_l1 + X.centrality_l2
# X["rec_sqrt_msv"] = np.sqrt(1./X.m_sv)
# for class_ in ["jetFakes","ztt_embed","qqh"]:
# pf.plot_signal_background(
# X[X.multi_class == "ggh"], X[X.multi_class == class_], 'centrality',
# channel, sig_sample,
# bins=100
# )
# make zeppenfeld variable
X["zfeld"] = np.fabs(X.eta_h - (X.jeta_1 + X.jeta_2)/2.)
# print X["zfeld"]
# make centrality variable
X["centrality"] = np.exp(-4*(X.zfeld/np.fabs(X.jdeta))**2)
if mjj_training == "low":
X = X[X["multi_class"] != "misc"]
X["dphi_custom"] = np.arccos(1-X.mt_lep**2/(2.*X.pt_1*X.pt_2))
X["dR_custom"] = np.sqrt((X.eta_1-X.eta_2)**2 + (X.dphi_custom)**2)
X_train,X_test, y_train,y_test,w_train,w_test = train_test_split(
X,
X['multi_class'],
X['wt_xs'],
test_size=0.25,
random_state=123456,
stratify=X['multi_class'].as_matrix(),
)
print(X_train[(X_train.multi_class == 'ggh')].shape)
del X
gc.collect()
# if want to plot any variables
# pf.plot_signal_background(X[X["multi_class"] == "ggh"], X[X["multi_class"] == "qqh"], "mjj",channel,sig_sample)
sum_w = X_train['wt_xs'].sum()
# print 'sum_w', sum_w
sum_w_cat = X_train.groupby('multi_class')['wt_xs'].sum()
# print 'sum_w_cat', sum_w_cat
class_weights = sum_w / sum_w_cat
class_weight_dict = dict(class_weights)
print(class_weight_dict)
# multiply w_train by class_weight now
# add mjj dependent weight for ggH
for i in w_train.index:
for key, value in class_weight_dict.items():
if y_train[i] == key:
if key == "ggh" and mjj_training == "high":
# print 'before: ',w_train.at[i]
w_train.at[i] *= value * 1.0
# print 'after multiplying by class_weight: ',w_train.at[i]
# wt_mjj = X_train['mjj'].at[i] * 0.003104 - 0.009583 if X_train['mjj'].at[i] > 300 else 1.0 #from ROC until 1500 GeV
# wt_mjj = X_train['mjj'].at[i] * 0.003104 - 0.009583 if X_train['mjj'].at[i] > 500 and X_train['mjj'].at[i] < 1500 else 1.0 #from ROC (slightly higher) until 1500 GeV
# wt_mjj = np.sqrt(X_train['mjj'].at[i]) * 0.1368 - 1.3694 # sqrt function
# wt_mjj = 1.5 if X_train['mjj'].at[i] > 300 and X_train['mjj'].at[i] < 600 else 1.0 # step function
# wt_mjj = ((X_train['mjj'].at[i])**2 * 0.000017 - (X_train['mjj'].at[i] * 0.0017)) #second order poly
# w_train.at[i] *= wt_mjj
# elif key == 'qqh' and mjj_training == "high":
# w_train.at[i] *= value*1.5
# elif key == 'ztt_embed' and mjj_training == "high":
# w_train.at[i] *= value*0.5
# elif channel == 'em' and key == 'qcd':
# w_train.at[i] *= value*2.0
else:
w_train.at[i] *= value
# print w_train
# minMax = MinMaxScaler()
# w_train = minMax.fit_transform(w_train)
# print w_train
## use one-hot encoding
# encode class values as integers
encoder_train = LabelEncoder()
encoder_test = LabelEncoder()
encoder_train.fit(y_train)
y_train = encoder_train.transform(y_train)
encoder_test.fit(y_test)
y_test = encoder_test.transform(y_test)
# test_class_weight = class_weight.compute_class_weight(
# 'balanced', np.unique(encoded_Y), encoded_Y
# )
# print test_class_weight
# print 'original Y: ', X_train['multi_class'].head()
# print 'one-hot y: ', y_train
print(X_train.head(5))
X_train = X_train.drop([
'wt','wt_xs', 'process', 'multi_class','event',
'gen_match_1', 'gen_match_2',#'eta_tt',
# 'dphi_custom',
# 'dR','opp_sides','mjj','pt_h',
# 'met_dphi_1','met_dphi_2',
# 'zfeld',
#'jeta_1','jeta_2',#'zfeld',
# 'jpt_1','jpt_2','dijetpt',
], axis=1).reset_index(drop=True)
X_test = X_test.drop([
'wt','wt_xs', 'process', 'multi_class','event',
'gen_match_1', 'gen_match_2',#'eta_tt',
# 'dphi_custom',
# 'dR','opp_sides','mjj','pt_h',
# 'met_dphi_1','met_dphi_2',
# 'zfeld',
# 'jeta_1','jeta_2',#'zfeld',
# 'jpt_1','jpt_2','dijetpt',
], axis=1).reset_index(drop=True)
if channel == "em":
X_train = X_train.drop(["wt_em_qcd"], axis=1).reset_index(drop=True)
X_test = X_test.drop(["wt_em_qcd"], axis=1).reset_index(drop=True)
if mjj_training == "high":
X_train = X_train.drop(["dphi_custom"], axis=1).reset_index(drop=True)
X_test = X_test.drop(["dphi_custom"], axis=1).reset_index(drop=True)
if mjj_training == "low":
X_train = X_train.drop(["dphi_custom"], axis=1).reset_index(drop=True)
X_test = X_test.drop(["dphi_custom"], axis=1).reset_index(drop=True)
# else:
# X_train = X_train.drop(["zfeld","centrality"], axis=1).reset_index(drop=True)
# X_test = X_test.drop(["zfeld","centrality"], axis=1).reset_index(drop=True)
# pf.plot_correlation_matrix(X_train, 'correlation_matrix.pdf')
# MI = mutual_info_classif(X_train,y_train)
# print MI
## standard scaler
# scaler = StandardScaler()
# np_scaled_fit = scaler.fit(X_train.as_matrix())
# with open('{}_fold{}_scaler.pkl'.format(channel, fold), 'w') as f:
# pickle.dump(scaler, f)
# uncomment here if want to use scaler
## load scaler from make_dataset
# with open('{}_{}_scaler.pkl'.format(channel,mjj_training), 'r') as f:
# scaler = pickle.load(f)
# print X_train.head()
# np_scaled_train = scaler.transform(X_train.as_matrix())
# X_scaled_train = pd.DataFrame(np_scaled_train)
# X_scaled_train.columns = X_train.columns
# del X_train
# X_train = X_scaled_train
# print X_train.head()
# del X_scaled_train
# np_scaled_test = scaler.transform(X_test.as_matrix())
# X_scaled_test = pd.DataFrame(np_scaled_test)
# X_scaled_test.columns = X_test.columns
# del X_test
# X_test = X_scaled_test
# del X_scaled_test
# X_train = X_train.drop([
# 'zfeld','jeta_1','jeta_2'
# ], axis=1).reset_index(drop=True)
# X_test = X_test.drop([
# 'zfeld','jeta_1','jeta_2'
# ], axis=1).reset_index(drop=True)
# to use names "f0" etcs
print(X_train.columns)
orig_columns = X_train.columns
X_train.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
X_test.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
print(X_train.columns)
## SOME TESTS WITH WEIGHTS
# w_train *= (sum(w) / sum(w_train))
# w_test *= (sum(w) / sum(w_test))
# sum_wpos = np.sum(w_train[y_train == 1])
# sum_wneg = np.sum(w_train[y_train != 1])
# ratio = sum_wneg / sum_wpos
# X_train = X_train.drop(['wt', 'class', 'eta_1', 'eta_2'], axis=1).reset_index(drop=True)
# X_test = X_test.drop(['wt', 'class', 'eta_1', 'eta_2'], axis=1).reset_index(drop=True)
# if channel == 'tt':
# if sig_sample == 'powheg':
# params = {
# 'objective':'multi:softprob',
# 'max_depth':3,
# 'min_child_weight':1,
# 'learning_rate':0.01,
# 'silent':1,
# # 'scale_pos_weight':ratio,
# 'n_estimators':2000,
# 'gamma':1.0,
# 'subsample':0.7,
# 'colsample_bytree':0.8,
# 'max_delta_step':1,
# 'nthread':-1,
# 'seed':123456
# }
if mjj_training in ['low']:
if analysis == 'sm':
if channel in ['tt','mt','et','em']:
params = {
'objective':'multi:softprob',
'max_depth':8,
# 'min_child_weight':1,
'learning_rate':0.05,
'silent':1,
# 'scale_pos_weight':ratio,
'n_estimators':500,
'gamma':0,
'subsample':0.8,
'colsample_bytree':0.8,
# 'max_delta_step':3,
'nthread':-1,
# 'missing':-9999,
'seed':123456
}
if analysis == 'cpsm':
if channel in ['mt','et']:
params = {
'objective':'multi:softprob',
'max_depth':7,
# 'min_child_weight':1,
'learning_rate':0.025,
'silent':1,
# 'scale_pos_weight':ratio,
'n_estimators':3000,
'gamma':5,
'subsample':0.9,
'colsample_bytree':0.6,
# 'max_delta_step':3,
'nthread':-1,
# 'missing':-9999,
'seed':123456
}
if channel in ['tt']:
params = {
'objective':'multi:softprob',
'max_depth':7,
# 'min_child_weight':1,
'learning_rate':0.025,
'silent':1,
# 'scale_pos_weight':ratio,
'n_estimators':200,
'gamma':5,
'subsample':0.9,
'colsample_bytree':0.6,
# 'max_delta_step':3,
'nthread':-1,
# 'missing':-9999,
'seed':123456
}
if channel in ['em']:
params = {
'objective':'multi:softprob',
'max_depth':7,
# 'min_child_weight':1,
'learning_rate':0.025,
'silent':1,
# 'scale_pos_weight':ratio,
'n_estimators':1000,
'gamma':5,
'subsample':0.9,
'colsample_bytree':0.6,
# 'max_delta_step':3,
'nthread':-1,
# 'missing':-9999,
'seed':123456
}
if mjj_training in ['high']:
if channel in ['tt']:
params = {
'objective':'multi:softprob',
'max_depth':5,
'min_child_weight':0,
'learning_rate':0.025,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':600,
# 'gamma':5,
'subsample':0.9,
# 'colsample_bytree':0.9,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
if channel in ['mt','et']:
params = {
'objective':'multi:softprob',
'max_depth':4,
# 'min_child_weight':1,
'learning_rate':0.025,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':1500,
'gamma':5,
'subsample':0.9,
'colsample_bytree':0.6,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
if channel in ['em']:
params = {
'objective':'multi:softprob',
'max_depth':4,
# 'min_child_weight':1,
'learning_rate':0.025,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':1500,
'gamma':5,
'subsample':0.9,
'colsample_bytree':0.6,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
if sig_sample == "madgraph":
if mjj_training in ['high','high_tight']:
if channel in ['tt']:
params = {
'objective':'multi:softprob',
'max_depth':5,
# 'min_child_weight':5,
'learning_rate':0.025,
'silent':1,
# 'scale_pos_weight':1000,
'n_estimators':10000,
'gamma':0.1,
'reg_lambda':0.3,
# 'reg_alpha':0.1,
'subsample':0.8,
# 'colsample_bytree':0.9,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
if channel in ['mt','et']:
params = {
'objective':'multi:softprob',
'max_depth':6,
# 'min_child_weight':1,
'learning_rate':0.025,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':10000,
'gamma':0.1,
'reg_lambda':0.3,
# 'reg_alpha':0.1,
'subsample':0.8,
# 'colsample_bytree':0.9,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
if channel in ['em']:
params = {
'objective':'multi:softprob',
'max_depth':6,
# 'min_child_weight':5,
'learning_rate':0.025,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':10000,
'gamma':0.1,
'reg_lambda':0.3,
# 'reg_alpha':0.1,
'subsample':0.8,
# 'colsample_bytree':0.9,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
elif mjj_training in ['low']:
if channel in ['tt']:
params = {
'objective':'multi:softprob',
'max_depth':6,
'min_child_weight':1,
'learning_rate':0.01,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':10000,
'gamma':2,
'subsample':0.9,
'colsample_bytree':0.6,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
if channel in ['mt','et']:
params = {
'objective':'multi:softprob',
'max_depth':6,
'min_child_weight':1,
'learning_rate':0.05,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':10000,
'gamma':0.1,
'reg_lambda':0.3,
'subsample':0.8,
# 'colsample_bytree':0.6,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
if channel in ['em']:
params = {
'objective':'multi:softprob',
'max_depth':6,
'min_child_weight':1,
'learning_rate':0.05,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':10000,
'gamma':0.1,
'reg_lambda':0.3,
'subsample':0.8,
# 'colsample_bytree':0.6,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
# if channel in ['mt','et','em']:
# params = {
# 'objective':'multi:softprob',
# 'max_depth':5,
# # 'min_child_weight':1,
# 'learning_rate':0.025,
# 'silent':1,
# # 'scale_pos_weight':1,
# 'n_estimators':3000,
# # 'gamma':10,
# 'subsample':0.9,
# # 'colsample_bytree':0.5,
# # 'max_delta_step':5,
# 'nthread':-1,
# # 'missing':-9999,
# 'seed':123456
# }
# if channel in ['et']:
# params = {
# 'objective':'multi:softprob',
# 'max_depth':4,
# # 'min_child_weight':1,
# 'learning_rate':0.1,
# 'silent':1,
# # 'scale_pos_weight':1,
# 'n_estimators':10000,
# # 'gamma':10,
# 'subsample':0.9,
# # 'colsample_bytree':0.5,
# # 'max_delta_step':5,
# 'nthread':-1,
# # 'missing':-9999,
# 'seed':123456
# }
# if channel in ['em']:
# params = {
# 'objective':'multi:softprob',
# 'max_depth':5,
# # 'min_child_weight':1,
# 'learning_rate':0.005,
# 'silent':1,
# # 'scale_pos_weight':1,
# 'n_estimators':3500,
# # 'gamma':10,
# 'subsample':0.9,
# # 'colsample_bytree':0.5,
# # 'max_delta_step':5,
# 'nthread':-1,
# # 'missing':-9999,
# 'seed':123456
# }
xgb_clf = xgb.XGBClassifier(**params)
# select features using threshold
# selection = SelectFromModel(xgb_clf)
if mjj_training in ['high','high_tight']:
if channel in ['tt','mt','et','em']:
xgb_clf.fit(
X_train,
y_train,
sample_weight = w_train,
early_stopping_rounds=50,
eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
eval_metric = ['merror','mlogloss'],
# eval_metric = custom_mean_squared_error,
# eval_metric = custom_f1_score,
verbose=True
)
# selection.fit(
# X_train,
# y_train,
# sample_weight = w_train,
# early_stopping_rounds=50,
# eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
# eval_metric = ['merror','mlogloss'],
# # eval_metric = custom_f1_score,
# verbose=True
# )
if mjj_training in ['low']:
if channel in ['tt','mt','et','em']:
xgb_clf.fit(
X_train,
y_train,
sample_weight = w_train,
early_stopping_rounds=50,
eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
eval_metric = ['merror','mlogloss'],
verbose=True
)
# if channel in ['em']:
# xgb_clf.fit(
# X_train,
# y_train,
# sample_weight = w_train,
# # early_stopping_rounds=50,
# eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
# eval_metric = custom_mean_squared_error,
# verbose=True
# )
# if sig_sample in ['JHU']:
# if channel in ['tt','mt','et','em']:
# xgb_clf.fit(
# X_train,
# y_train,
# sample_weight = w_train,
# early_stopping_rounds=20,
# eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
# eval_metric = ['mlogloss'],
# verbose=True
# )
# evals_result = xgb_clf.evals_result()
# print selection.get_support()
# print "best iteration: ",xgb_clf.best_iteration
# eli5 explanation
# print explain_prediction_xgboost(xgb_clf.get_booster(),X_test.iloc[0])
# xgb_bo = BayesianOptimization(xgb_clf, {
# "max_depth": (2,8),
# "gamma": (0.001, 3.0),
# "min_child_weight": (0, 20),
# "max_delta_step": (0, 5),
# "subsample": (0.4, 1.0),
# "colsample_bytree": (0.4, 1.0),
# "reg_lambda": (0.001, 1.0),
# })
# print(("-"*100))
# xgb_bo.maximize(init_points=2, n_iter=5)
# print(('Maximum XGBOOST value: %f' % XGB_BO.res['max']['max_val']))
# print(('Best XGBOOST parameters: ', XGB_BO.res['max']['max_params']))
# y_predict = selection.predict(X_test)
y_predict = xgb_clf.predict(X_test)
print('true label: {},{},{},{},{},{}'.format(y_test[0],y_test[1],y_test[2],y_test[3],y_test[4],y_test[5]))
print('predicted label: {},{},{},{},{},{}'.format(y_predict[0],y_predict[1],y_predict[2],y_predict[3],y_predict[4],y_predict[5]))
print('\n Mean Square Error: {}'.format(mean_squared_error(y_test,y_predict)))
print(classification_report(
y_test,
y_predict,
# target_names=["background", "signal"],
target_names=list(encoder_test.classes_),
sample_weight=w_test
))
y_pred = xgb_clf.predict_proba(X_test)
print('all probs: {} \n {} \n {}'.format(y_pred[0],y_pred[1],y_pred[2],y_pred[3],y_pred[4],y_pred[5]))
print('highest proba: {},{},{}'.format(max(y_pred[0]),max(y_pred[1]),max(y_pred[2])))
print(xgb_clf)
with open('multi_fold{}_{}_{}_{}_{}_xgb.pkl'.format(fold, analysis, channel, sig_sample, mjj_training), 'w') as f:
pickle.dump(xgb_clf, f)
# Define these so that I can use plot_output()
xg_train = xgb.DMatrix(
X_train,
label=y_train,
# missing=-100.0,
weight=w_train
)
xg_test = xgb.DMatrix(
X_test,
label=y_test,
# missing=-100.0,
weight=w_test
)
## Plotting things
pf.plot_learning_curve(xgb_clf,
"mlogloss",
"multi_fold{}_{}_{}_{}_{}_learning_curve_logloss.pdf".format(fold, analysis, channel, sig_sample, mjj_training))
pf.plot_learning_curve(xgb_clf,
"merror",
"multi_fold{}_{}_{}_{}_{}_learning_curve_error.pdf".format(fold, analysis, channel, sig_sample, mjj_training))
# pf.plot_output(
# xgb_clf.booster(),
# xg_train, xg_test,
# y_train, y_test,
# 'multi_{}_{}_output.pdf'.format(channel, sig_sample))
pf.plot_features(
xgb_clf,
'weight',
'multi_fold{}_{}_{}_{}_{}_features_weight.pdf'.format(fold, analysis, channel, sig_sample, mjj_training))
pf.plot_features(
xgb_clf,
'gain',
'multi_fold{}_{}_{}_{}_{}_features_gain.pdf'.format(fold, analysis, channel, sig_sample, mjj_training))
y_prediction = xgb_clf.predict(X_test)
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
# classes=['background', 'signal'],
classes=list(encoder_test.classes_),
figname='multi_fold{}_{}_{}_{}_{}_non-normalised_weights_cm.pdf'.format(fold, analysis, channel, sig_sample, mjj_training))
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
classes=list(encoder_test.classes_),
figname='multi_fold{}_{}_{}_{}_{}_normalised_efficiency_weights_cm.pdf'.format(fold, analysis, channel, sig_sample, mjj_training),
normalise_by_col=True)
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
classes=list(encoder_test.classes_),
figname='multi_fold{}_{}_{}_{}_{}_normalised_purity_weights_cm.pdf'.format(fold, analysis, channel, sig_sample, mjj_training),
normalise_by_row=True)
return None
#### NEW FUNCTION FOR INCLUSIVE TRAINING (CP IN DECAYS)
def fit_multiclass_kfold_inc(X, fold, analysis, channel, sig_sample, era, splitByDM=None):
## START EDITING THIS FOR ODD/EVEN SPLIT
print('Training XGBoost model fold{}'.format(fold))
print(X.columns)
print(X["multi_class"])
X.dropna(inplace=True)
# X = X[X["multi_class"] != "misc"]
if channel == "em":
X = X[X["multi_class"] != "qcd"]
X = X[X["multi_class"] != "misc"]
X["multi_class"].replace("qqh","ggh",inplace=True)
X["multi_class"].replace("ggh","higgs",inplace=True)
# split by DM here (HPS for now)
if splitByDM is not None:
if splitByDM == 1:
X.eval("tau_decay_mode_1==1 and tau_decay_mode_2==1", inplace=True)
if splitByDM == 2:
X.eval("(tau_decay_mode_1==1 and tau_decay_mode_2==10) or (tau_decay_mode_1==10 and tau_decay_mode_2==1)", inplace=True)
# X = X.drop(["tau_decay_mode_1", "tau_decay_mode_2"], axis=1).reset_index(drop=True)
# X["rms_pt"] = np.sqrt(0.5 * (X.pt_1**2 + X.pt_2**2))
# X["rms_jpt"] = np.sqrt(0.5 * (X.jpt_1**2 + X.jpt_2**2))
# # make zeppenfeld variable
# X["zfeld"] = np.fabs(X.eta_h - (X.jeta_1 + X.jeta_2)/2.)
# # print X["zfeld"]
# # make centrality variable
# X["centrality"] = np.exp(-4*(X.zfeld/np.fabs(X.jdeta))**2)
# X["dphi_custom"] = np.arccos(1-X.mt_lep**2/(2.*X.pt_1*X.pt_2))
# X["dR_custom"] = np.sqrt((X.eta_1-X.eta_2)**2 + (X.dphi_custom)**2)
X_train,X_test, y_train,y_test,w_train,w_test = train_test_split(
X,
X['multi_class'],
X['wt_xs'],
test_size=0.25,
random_state=123456,
stratify=X['multi_class'].as_matrix(),
)
print(X_train[(X_train.multi_class == 'ggh')].shape)
del X
gc.collect()
# if want to plot any variables
# pf.plot_signal_background(X[X["multi_class"] == "ggh"], X[X["multi_class"] == "qqh"], "mjj",channel,sig_sample)
sum_w = X_train['wt_xs'].sum()
# print 'sum_w', sum_w
sum_w_cat = X_train.groupby('multi_class')['wt_xs'].sum()
# print 'sum_w_cat', sum_w_cat
class_weights = sum_w / sum_w_cat
class_weight_dict = dict(class_weights)
print(class_weight_dict)
# multiply w_train by class_weight now
# add mjj dependent weight for ggH
for i in w_train.index:
for key, value in class_weight_dict.items():
if y_train[i] == key:
w_train.at[i] *= value
sum_w_cat_after = X_train.groupby('multi_class')['wt_xs'].sum()
print(sum_w_cat_after)
# if want to replace here to check effect of reweighting by class here
# X_train["multi_class"].replace("qqh","ggh",inplace=True)
# X_test["multi_class"].replace("qqh","ggh",inplace=True)
# y_train = np.where(y_train=="qqh", "ggh", y_train)
# y_test = np.where(y_test=="qqh", "ggh", y_test)
## use one-hot encoding
# encode class values as integers
encoder_train = LabelEncoder()
encoder_test = LabelEncoder()
encoder_train.fit(y_train)
y_train = encoder_train.transform(y_train)
encoder_test.fit(y_test)
y_test = encoder_test.transform(y_test)
print(X_train.head(5))
dropVars = ["wt","wt_xs", "process", "multi_class","event","gen_match_1", "gen_match_2",]
if sig_sample in ["tauspinner","powheg"]:
dropVars.append("wt_cp_sm")
dropVars.append("wt_cp_ps")
if channel == "em":
dropVars.append("wt_em_qcd")
X_train = X_train.drop(dropVars, axis=1).reset_index(drop=True)
X_test = X_test.drop(dropVars, axis=1).reset_index(drop=True)
pf.plot_correlation_matrix(X_train, 'correlation_matrix.pdf')
# MI = mutual_info_classif(X_train,y_train)
# print MI
## standard scaler
# scaler = StandardScaler()
# np_scaled_fit = scaler.fit(X_train.as_matrix())
# with open('{}_fold{}_scaler.pkl'.format(channel, fold), 'w') as f:
# pickle.dump(scaler, f)
# uncomment here if want to use scaler
## load scaler from make_dataset
# with open('{}_{}_scaler.pkl'.format(channel,mjj_training), 'r') as f:
# scaler = pickle.load(f)
# print X_train.head()
# np_scaled_train = scaler.transform(X_train.as_matrix())
# X_scaled_train = pd.DataFrame(np_scaled_train)
# X_scaled_train.columns = X_train.columns
# del X_train
# X_train = X_scaled_train
# print X_train.head()
# del X_scaled_train
# np_scaled_test = scaler.transform(X_test.as_matrix())
# X_scaled_test = pd.DataFrame(np_scaled_test)
# X_scaled_test.columns = X_test.columns
# del X_test
# X_test = X_scaled_test
# del X_scaled_test
# X_train = X_train.drop([
# 'zfeld','jeta_1','jeta_2'
# ], axis=1).reset_index(drop=True)
# X_test = X_test.drop([
# 'zfeld','jeta_1','jeta_2'
# ], axis=1).reset_index(drop=True)
# to use names "f0" etcs
print(X_train.columns)
orig_columns = X_train.columns
X_train.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
X_test.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
print(X_train.columns)
## SOME TESTS WITH WEIGHTS
# w_train *= (sum(w) / sum(w_train))
# w_test *= (sum(w) / sum(w_test))
# sum_wpos = np.sum(w_train[y_train == 1])
# sum_wneg = np.sum(w_train[y_train != 1])
# ratio = sum_wneg / sum_wpos
# X_train = X_train.drop(['wt', 'class', 'eta_1', 'eta_2'], axis=1).reset_index(drop=True)
# X_test = X_test.drop(['wt', 'class', 'eta_1', 'eta_2'], axis=1).reset_index(drop=True)
if channel in ['tt']:
params = {
'objective':'multi:softprob',
'max_depth':4,
'min_child_weight':1,
# 'learning_rate':0.05,
'learning_rate':0.1,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':10000,
'gamma':2,
'subsample':0.9,
'colsample_bytree':0.6,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
if channel in ['mt','et']:
params = {
'objective':'multi:softprob',
'max_depth':4,
# 'min_child_weight':1,
'learning_rate':1,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':10000,
# 'gamma':0.1,
# 'reg_lambda':0.3,
'subsample':0.9,
'colsample_bytree':0.6,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
if channel in ['em']:
params = {
'objective':'multi:softprob',
'max_depth':4,
'min_child_weight':1,
'learning_rate':0.05,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':10000,
'gamma':0.1,
'reg_lambda':0.3,
'subsample':0.8,
# 'colsample_bytree':0.6,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
xgb_clf = xgb.XGBClassifier(**params)
# select features using threshold
# selection = SelectFromModel(xgb_clf)
if channel in ['tt','mt','et','em']:
xgb_clf.fit(
X_train,
y_train,
sample_weight = w_train,
early_stopping_rounds=20,
eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
eval_metric = ['merror','mlogloss'],
# eval_metric = custom_mean_squared_error,
# eval_metric = custom_f1_score,
verbose=True
)
# selection.fit(
# X_train,
# y_train,
# sample_weight = w_train,
# early_stopping_rounds=50,
# eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
# eval_metric = ['merror','mlogloss'],
# # eval_metric = custom_f1_score,
# verbose=True
# )
# evals_result = xgb_clf.evals_result()
# print selection.get_support()
# print "best iteration: ",xgb_clf.best_iteration
# eli5 explanation
# print explain_prediction_xgboost(xgb_clf.get_booster(),X_test.iloc[0])
# y_predict = selection.predict(X_test)
y_predict = xgb_clf.predict(X_test)
print('true label: {},{},{},{},{},{}'.format(y_test[0],y_test[1],y_test[2],y_test[3],y_test[4],y_test[5]))
print('predicted label: {},{},{},{},{},{}'.format(y_predict[0],y_predict[1],y_predict[2],y_predict[3],y_predict[4],y_predict[5]))
print('\n Mean Square Error: {}'.format(mean_squared_error(y_test,y_predict)))
print(classification_report(
y_test,
y_predict,
# target_names=["background", "signal"],
target_names=list(encoder_test.classes_),
sample_weight=w_test
))
y_pred = xgb_clf.predict_proba(X_test)
print('all probs: {} \n {} \n {}'.format(y_pred[0],y_pred[1],y_pred[2],y_pred[3],y_pred[4],y_pred[5]))
print('highest proba: {},{},{}'.format(max(y_pred[0]),max(y_pred[1]),max(y_pred[2])))
print(xgb_clf)
with open('multi_fold{}_{}_{}_{}_{}_xgb.pkl'.format(fold, analysis, channel, sig_sample, era), 'w') as f:
pickle.dump(xgb_clf, f)
# Define these so that I can use plot_output()
xg_train = xgb.DMatrix(
X_train,
label=y_train,
# missing=-100.0,
weight=w_train
)
xg_test = xgb.DMatrix(
X_test,
label=y_test,
# missing=-100.0,
weight=w_test
)
## Plotting things
pf.plot_learning_curve(xgb_clf,
"mlogloss",
"multi_fold{}_{}_{}_{}_{}_learning_curve_logloss.pdf".format(fold, analysis, channel, sig_sample, era))
pf.plot_learning_curve(xgb_clf,
"merror",
"multi_fold{}_{}_{}_{}_{}_learning_curve_error.pdf".format(fold, analysis, channel, sig_sample, era))
# pf.plot_output(
# xgb_clf.booster(),
# xg_train, xg_test,
# y_train, y_test,
# 'multi_{}_{}_output.pdf'.format(channel, sig_sample))
pf.plot_features(
xgb_clf,
'weight',
'multi_fold{}_{}_{}_{}_{}_features_weight.pdf'.format(fold, analysis, channel, sig_sample, era))
pf.plot_features(
xgb_clf,
'gain',
'multi_fold{}_{}_{}_{}_{}_features_gain.pdf'.format(fold, analysis, channel, sig_sample, era))
y_prediction = xgb_clf.predict(X_test)
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
# classes=['background', 'signal'],
classes=list(encoder_test.classes_),
figname='multi_fold{}_{}_{}_{}_{}_non-normalised_weights_cm.pdf'.format(fold, analysis, channel, sig_sample, era))
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
classes=list(encoder_test.classes_),
figname='multi_fold{}_{}_{}_{}_{}_normalised_efficiency_weights_cm.pdf'.format(fold, analysis, channel, sig_sample, era),
normalise_by_col=True)
pf.plot_confusion_matrix(
y_test, y_prediction, w_test,
classes=list(encoder_test.classes_),
figname='multi_fold{}_{}_{}_{}_{}_normalised_purity_weights_cm.pdf'.format(fold, analysis, channel, sig_sample, era),
normalise_by_row=True)
return None
######## TESTING CV
def fit_multiclass_cvkfold(X, fold, analysis, channel, sig_sample):
## START EDITING THIS FOR ODD/EVEN SPLIT
print('Training XGBoost model fold{}'.format(fold))
numFolds = 4
folds = StratifiedKFold(n_splits=numFolds, shuffle=True, random_state=123456)
estimators = []
results = np.zeros(X.shape[0])
score = 0.0
X = X.reset_index(drop=True)
for train_index, test_index in folds.split(X,X['multi_class']):
print(train_index)
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = X['multi_class'][train_index], X['multi_class'][test_index]
w_train, w_test = X['wt_xs'][train_index], X['wt_xs'][test_index]
print(X_train[(X_train.multi_class == 'ggh')].shape)
sum_w = X_train['wt_xs'].sum()
# print 'sum_w', sum_w
sum_w_cat = X_train.groupby('multi_class')['wt_xs'].sum()
# print 'sum_w_cat', sum_w_cat
class_weights = sum_w / sum_w_cat
class_weight_dict = dict(class_weights)
print(class_weight_dict)
# multiply w_train by class_weight now
for i in w_train.index:
for key, value in class_weight_dict.items():
# print 'before: ',index, row
if y_train[i] == key:
# if key == 'ggh':
# w_train.at[i] *= value
# else:
w_train.at[i] *= value
# print 'after dividing by class_weight: ',index, row
## use one-hot encoding
# encode class values as integers
encoder_train = LabelEncoder()
encoder_test = LabelEncoder()
encoder_train.fit(y_train)
y_train = encoder_train.transform(y_train)
encoder_test.fit(y_test)
y_test = encoder_test.transform(y_test)
X_train = X_train.drop([
'wt','wt_xs', 'process', 'multi_class','event',
'gen_match_1', 'gen_match_2','eta_tt',
# 'jpt_1','jpt_2','dijetpt',
], axis=1).reset_index(drop=True)
X_test = X_test.drop([
'wt','wt_xs', 'process', 'multi_class','event',
'gen_match_1', 'gen_match_2','eta_tt',
# 'jpt_1','jpt_2','dijetpt',
], axis=1).reset_index(drop=True)
# to use names "f0" etcs
print(X_train.columns)
orig_columns = X_train.columns
X_train.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
X_test.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
print(X_train.columns)
## standard scaler
# scaler = StandardScaler()
# np_scaled_fit = scaler.fit(X_train.as_matrix())
# with open('{}_fold{}_scaler.pkl'.format(channel, fold), 'w') as f:
# pickle.dump(scaler, f)
# np_scaled_train = scaler.transform(X_train.as_matrix())
# X_scaled_train = pd.DataFrame(np_scaled_train)
# X_scaled_train.columns = X_train.columns
# del X_train
# X_train = X_scaled_train
# del X_scaled_train
# np_scaled_test = scaler.transform(X_test.as_matrix())
# X_scaled_test = pd.DataFrame(np_scaled_test)
# X_scaled_test.columns = X_test.columns
# del X_test
# X_test = X_scaled_test
# del X_scaled_test
## SOME TESTS WITH WEIGHTS
# w_train *= (sum(w) / sum(w_train))
# w_test *= (sum(w) / sum(w_test))
# sum_wpos = np.sum(w_train[y_train == 1])
# sum_wneg = np.sum(w_train[y_train != 1])
# ratio = sum_wneg / sum_wpos
# X_train = X_train.drop(['wt', 'class', 'eta_1', 'eta_2'], axis=1).reset_index(drop=True)
# X_test = X_test.drop(['wt', 'class', 'eta_1', 'eta_2'], axis=1).reset_index(drop=True)
# if channel == 'tt':
# if sig_sample == 'powheg':
# params = {
# 'objective':'multi:softprob',
# 'max_depth':3,
# 'min_child_weight':1,
# 'learning_rate':0.01,
# 'silent':1,
# # 'scale_pos_weight':ratio,
# 'n_estimators':2000,
# 'gamma':1.0,
# 'subsample':0.7,
# 'colsample_bytree':0.8,
# 'max_delta_step':1,
# 'nthread':-1,
# 'seed':123456
# }
if sig_sample in ['powheg']:
if analysis == 'sm':
if channel in ['tt','mt','et','em']:
params = {
'objective':'multi:softprob',
'max_depth':8,
# 'min_child_weight':1,
'learning_rate':0.05,
'silent':1,
# 'scale_pos_weight':ratio,
'n_estimators':500,
'gamma':0,
'subsample':0.8,
'colsample_bytree':0.8,
# 'max_delta_step':3,
'nthread':-1,
# 'missing':-9999,
'seed':123456
}
if analysis == 'cpsm':
if channel in ['tt','mt','et']:
params = {
'objective':'multi:softprob',
'max_depth':7,
# 'min_child_weight':1,
'learning_rate':0.05,
'silent':1,
# 'scale_pos_weight':ratio,
'n_estimators':300,
# 'gamma':0,
'subsample':0.9,
# 'colsample_bytree':0.5,
# 'max_delta_step':3,
'nthread':-1,
# 'missing':-9999,
'seed':123456
}
if channel in ['em']:
params = {
'objective':'multi:softprob',
'max_depth':7,
# 'min_child_weight':1,
'learning_rate':0.025,
'silent':1,
# 'scale_pos_weight':ratio,
'n_estimators':150,
# 'gamma':0,
'subsample':0.9,
# 'colsample_bytree':0.5,
# 'max_delta_step':3,
'nthread':-1,
# 'missing':-9999,
'seed':123456
}
if sig_sample in ['JHU']:
if channel in ['tt','mt','et','em']:
params = {
'objective':'multi:softprob',
'max_depth':5,
# 'min_child_weight':1,
'learning_rate':0.025,
'silent':1,
# 'scale_pos_weight':1,
'n_estimators':3000,
'gamma':5,
'subsample':0.9,
'colsample_bylevel':0.6,
# 'max_delta_step':5,
'nthread':-1,
# 'missing':-100.0,
'seed':123456
}
# if channel in ['mt','et','em']:
# params = {
# 'objective':'multi:softprob',
# 'max_depth':5,
# # 'min_child_weight':1,
# 'learning_rate':0.025,
# 'silent':1,
# # 'scale_pos_weight':1,
# 'n_estimators':3000,
# # 'gamma':10,
# 'subsample':0.9,
# # 'colsample_bytree':0.5,
# # 'max_delta_step':5,
# 'nthread':-1,
# # 'missing':-9999,
# 'seed':123456
# }
# if channel in ['et']:
# params = {
# 'objective':'multi:softprob',
# 'max_depth':4,
# # 'min_child_weight':1,
# 'learning_rate':0.1,
# 'silent':1,
# # 'scale_pos_weight':1,
# 'n_estimators':10000,
# # 'gamma':10,
# 'subsample':0.9,
# # 'colsample_bytree':0.5,
# # 'max_delta_step':5,
# 'nthread':-1,
# # 'missing':-9999,
# 'seed':123456
# }
# if channel in ['em']:
# params = {
# 'objective':'multi:softprob',
# 'max_depth':5,
# # 'min_child_weight':1,
# 'learning_rate':0.005,
# 'silent':1,
# # 'scale_pos_weight':1,
# 'n_estimators':3500,
# # 'gamma':10,
# 'subsample':0.9,
# # 'colsample_bytree':0.5,
# # 'max_delta_step':5,
# 'nthread':-1,
# # 'missing':-9999,
# 'seed':123456
# }
print(params)
xgb_clf = xgb.XGBClassifier(**params)
if sig_sample in ['JHU']:
if channel in ['tt','mt','et','em']:
xgb_clf.fit(
X_train,
y_train,
sample_weight = w_train,
early_stopping_rounds=50,
eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
eval_metric = 'mlogloss',
verbose=True
)
if sig_sample in ['powheg']:
if channel in ['tt','mt','et']:
xgb_clf.fit(
X_train,
y_train,
sample_weight = w_train,
early_stopping_rounds=20,
eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
eval_metric = custom_mean_squared_error,
verbose=True
)
if channel in ['em']:
xgb_clf.fit(
X_train,
y_train,
sample_weight = w_train,
early_stopping_rounds=30,
eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
eval_metric = custom_mean_squared_error,
verbose=True
)
# if sig_sample in ['JHU']:
# if channel in ['tt','mt','et','em']:
# xgb_clf.fit(
# X_train,
# y_train,
# sample_weight = w_train,
# early_stopping_rounds=20,
# eval_set=[(X_train, y_train, w_train), (X_test, y_test, w_test)],
# eval_metric = ['mlogloss'],
# verbose=True
# )
# evals_result = xgb_clf.evals_result()
y_predict = xgb_clf.predict(X_test)
print('true label: {},{},{}'.format(y_test[0],y_test[1],y_test[2]))
print('predicted label: {},{},{}'.format(y_predict[0],y_predict[1],y_predict[2]))
print('\n Mean Square Error: {}'.format(mean_squared_error(y_test,y_predict)))
print(classification_report(
y_test,
y_predict,
# target_names=["background", "signal"],
target_names=list(encoder_test.classes_),
sample_weight=w_test
))
y_pred = xgb_clf.predict_proba(X_test)
print('all probs: {} \n {} \n {}'.format(y_pred[0],y_pred[1],y_pred[2]))
print('highest proba: {},{},{}'.format(max(y_pred[0]),max(y_pred[1]),max(y_pred[2])))
# with open('multi_fold{}_{}_{}_{}_xgb.pkl'.format(fold, analysis, channel, sig_sample), 'w') as f:
# pickle.dump(xgb_clf, f)
# Define these so that I can use plot_output()
# xg_train = xgb.DMatrix(
# X_train,
# label=y_train,
# # missing=-100.0,
# weight=w_train
# )
# xg_test = xgb.DMatrix(
# X_test,
# label=y_test,
# # missing=-100.0,
# weight=w_test
# )
# pf.plot_output(
# xgb_clf.booster(),
# xg_train, xg_test,
# y_train, y_test,
# 'multi_{}_{}_output.pdf'.format(channel, sig_sample))
# pf.plot_features(
# xgb_clf.booster(),
# 'weight',
# 'multi_fold{}_{}_{}_{}_features_weight.pdf'.format(fold, analysis, channel, sig_sample))
# pf.plot_features(
# xgb_clf.booster(),
# 'gain',
# 'multi_fold{}_{}_{}_{}_features_gain.pdf'.format(fold, analysis, channel, sig_sample))
# y_prediction = xgb_clf.predict(X_test)
# pf.plot_confusion_matrix(
# y_test, y_prediction, w_test,
# # classes=['background', 'signal'],
# classes=list(encoder_test.classes_),
# figname='multi_fold{}_{}_{}_{}_non-normalised_weights_cm.pdf'.format(fold, analysis, channel, sig_sample))
# pf.plot_confusion_matrix(
# y_test, y_prediction, w_test,
# classes=list(encoder_test.classes_),
# figname='multi_fold{}_{}_{}_{}_normalised_efficiency_weights_cm.pdf'.format(fold, analysis, channel, sig_sample),
# normalise_by_col=True)
# pf.plot_confusion_matrix(
# y_test, y_prediction, w_test,
# classes=list(encoder_test.classes_),
# figname='multi_fold{}_{}_{}_{}_normalised_purity_weights_cm.pdf'.format(fold, analysis, channel, sig_sample),
# normalise_by_row=True)
estimators.append(xgb_clf.best_iteration)
print(estimators)
results[test_index] = xgb_clf.predict(X_test)
score += f1_score(y_test, results[test_index],average='micro',sample_weight=w_test)
score /= numFolds
print(score)
return None
########
def fit_sklearnNN(X, channel, fold, analysis, sig_sample, mjj_training):
### TEST A KERAS MODEL
## START EDITING THIS FOR ODD/EVEN SPLIT
print('Training keras model fold{}'.format(fold))
if mjj_training == "high":
X = X[X["multi_class"] != "misc"]
if channel == "em":
X = X[X["multi_class"] != "qcd"]
X.multi_class.replace("qqh","ggh",inplace=True)
X_train,X_test, y_train,y_test,w_train,w_test = train_test_split(
X,
X['multi_class'],
X['wt_xs'],
test_size=0.25,
random_state=123456,
stratify=X['multi_class'].as_matrix(),
)
sum_w = X_train['wt_xs'].sum()
# print 'sum_w', sum_w
sum_w_cat = X_train.groupby('multi_class')['wt_xs'].sum()
# print 'sum_w_cat', sum_w_cat
class_weights = sum_w / sum_w_cat
class_weight_dict = dict(class_weights)
print(class_weight_dict)
# multiply w_train by class_weight now
for i in w_train.index:
for key, value in class_weight_dict.items():
if y_train[i] == key:
# if key == "ggh" and mjj_training == "high":
# w_train.at[i] *= value * 1.5/3.
# print 'after multiplying by class_weight: ',w_train.at[i]
# wt_mjj = X_train['mjj'].at[i] * 0.003104 - 0.009583 if X_train['mjj'].at[i] > 300 else 1.0 #from ROC until 1500 GeV
# w_train.at[i] *= wt_mjj
# else:
w_train.at[i] *= value
## use one-hot encoding
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(y_train)
encoded_y_train = encoder.transform(y_train)
# convert integers to dummy variables (i.e. one hot encoded)
y_train = np_utils.to_categorical(encoded_y_train, num_classes=3)
encoder.fit(y_test)
encoded_y_test = encoder.transform(y_test)
# convert integers to dummy variables (i.e. one hot encoded)
y_test = np_utils.to_categorical(encoded_y_test, num_classes=3)
print('original Y: ', X_train['multi_class'].head())
print('one-hot y: ', y_train[0])
X_train = X_train.drop([
'wt','wt_xs', 'process', 'multi_class','event',
'gen_match_1', 'gen_match_2','opp_sides',
], axis=1).reset_index(drop=True)
X_test = X_test.drop([
'wt','wt_xs', 'process', 'multi_class','event',
'gen_match_1', 'gen_match_2','opp_sides',
], axis=1).reset_index(drop=True)
if channel == "em":
X_train = X_train.drop(["wt_em_qcd"], axis=1).reset_index(drop=True)
X_test = X_test.drop(["wt_em_qcd"], axis=1).reset_index(drop=True)
# to use names "f0" etcs
print(X_train.columns)
orig_columns = X_train.columns
X_train.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
X_test.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
print(X_train.columns)
## standard scaler
columns = X_train.columns
scaler = StandardScaler()
X_train['wt'] = w_train.reset_index(drop=True)
np_scaled_train = scaler.fit_transform(X_train.as_matrix())
# with open('{}_{}_scaler.pkl'.format(channel, mjj_training), 'w') as f:
# pickle.dump(scaler, f)
scaled_train = np_scaled_train
# scaled_train = pd.DataFrame(np_scaled_train)
# scaled_train.columns = columns
X_test['wt'] = w_test.reset_index(drop=True)
np_scaled_test = scaler.transform(X_test.as_matrix())
scaled_test = np_scaled_test
# scaled_test = pd.DataFrame(np_scaled_test)
# scaled_test.columns = columns
scaled_train = X_train.drop(["wt"], axis=1).reset_index(drop=True)
scaled_test = X_test.drop(["wt"], axis=1).reset_index(drop=True)
clf = MLPClassifier(solver='adam', alpha=1e-5,
hidden_layer_sizes=(4,), random_state=123456,
verbose=True, nesterovs_momentum=True)
clf.fit(scaled_train,
y_train,
)
print(clf.score(scaled_test,y_test,w_test))
print(clf.predict(scaled_test))
# with open('keras_model_fold{}_{}_{}_{}_{}_xgb.pkl'.format(fold, analysis, channel, sig_sample, mjj_training), 'w') as f:
# pickle.dump(model,f)
# model.save('keras_model_weights_{}_{}.h5'.format(channel, sig_sample))
return None
###
def fit_keras(X, channel, fold, analysis, sig_sample, mjj_training):
### TEST A KERAS MODEL
## START EDITING THIS FOR ODD/EVEN SPLIT
print('Training keras model fold{}'.format(fold))
if mjj_training == "high":
X = X[X["multi_class"] != "misc"]
if channel == "em":
X = X[X["multi_class"] != "qcd"]
X.multi_class.replace("qqh","ggh",inplace=True)
X["zfeld"] = np.fabs(X.eta_h - (X.jeta_1 + X.jeta_2)/2.)
# # print X["zfeld"]
# # make centrality variable
X["centrality"] = np.exp(-4*(X.zfeld/np.fabs(X.jdeta))**2)
X["dphi_custom"] = np.arccos(1-X.mt_lep**2/(2.*X.pt_1*X.pt_2))
X["dR_custom"] = np.sqrt((X.eta_1-X.eta_2)**2 + (X.dphi_custom)**2)
X_train,X_test, y_train,y_test,w_train,w_test = train_test_split(
X,
X['multi_class'],
X['wt_xs'],
test_size=0.25,
random_state=123456,
stratify=X['multi_class'].values,
)
sum_w = X_train['wt_xs'].sum()
# print 'sum_w', sum_w
sum_w_cat = X_train.groupby('multi_class')['wt_xs'].sum()
# print 'sum_w_cat', sum_w_cat
class_weights = sum_w / sum_w_cat
class_weight_dict = dict(class_weights)
print(class_weight_dict)
# multiply w_train by class_weight now
for i in w_train.index:
for key, value in class_weight_dict.items():
if y_train[i] == key:
# if key == "ggh" and mjj_training == "high":
# w_train.at[i] *= value * 1.5/3.
# print 'after multiplying by class_weight: ',w_train.at[i]
# wt_mjj = X_train['mjj'].at[i] * 0.003104 - 0.009583 if X_train['mjj'].at[i] > 300 else 1.0 #from ROC until 1500 GeV
# w_train.at[i] *= wt_mjj
# else:
w_train.at[i] *= value
## use one-hot encoding
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(y_train)
encoded_y_train = encoder.transform(y_train)
# convert integers to dummy variables (i.e. one hot encoded)
y_train = np_utils.to_categorical(encoded_y_train, num_classes=3)
encoder.fit(y_test)
encoded_y_test = encoder.transform(y_test)
# convert integers to dummy variables (i.e. one hot encoded)
y_test = np_utils.to_categorical(encoded_y_test, num_classes=3)
print('original Y: ', X_train['multi_class'].head())
print('one-hot y: ', y_train[0])
X_train = X_train.drop([
'wt','wt_xs', 'process', 'multi_class','event',
'gen_match_1', 'gen_match_2','opp_sides','zfeld'
], axis=1).reset_index(drop=True)
X_test = X_test.drop([
'wt','wt_xs', 'process', 'multi_class','event',
'gen_match_1', 'gen_match_2','opp_sides','zfeld'
], axis=1).reset_index(drop=True)
if channel == "em":
X_train = X_train.drop(["wt_em_qcd"], axis=1).reset_index(drop=True)
X_test = X_test.drop(["wt_em_qcd"], axis=1).reset_index(drop=True)
# to use names "f0" etcs
print(X_train.columns)
orig_columns = X_train.columns
X_train.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
X_test.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
print(X_train.columns)
## standard scaler
columns = X_train.columns
scaler = StandardScaler()
np_scaled_train = scaler.fit_transform(X_train.values)
with open('{}_{}_scaler.pkl'.format(channel, mjj_training), 'w') as f:
pickle.dump(scaler, f)
scaled_train = np_scaled_train
# scaled_train = pd.DataFrame(np_scaled_train)
# scaled_train.columns = columns
np_scaled_test = scaler.transform(X_test.values)
scaled_test = np_scaled_test
# scaled_test = pd.DataFrame(np_scaled_test)
# scaled_test.columns = columns
# X_train = X_train.drop(["wt"], axis=1).reset_index(drop=True)
# X_test = X_test.drop(["wt"], axis=1).reset_index(drop=True)
min_maxscaler = MinMaxScaler()
print(w_train)
scaled_w_train = min_maxscaler.fit_transform(w_train.values.reshape(-1,1))
print(scaled_w_train)
scaled_w_test = min_maxscaler.transform(w_test.values.reshape(-1,1))
print((scaled_w_train.mean()))
print((scaled_w_train.mean()))
## how many features
num_inputs = scaled_train.shape[1]
## how many classes
num_outputs = 3
model = Sequential()
model.add(
Dense(
200,
kernel_initializer='glorot_normal',
activation='tanh',
kernel_regularizer=l2(1e-4),
input_dim=num_inputs
)
)
model.add(
Dense(
200,
kernel_initializer='glorot_normal',
activation='tanh',
kernel_regularizer=l2(1e-4),
)
)
model.add(
Dense(
200,
init='glorot_normal',
activation='tanh',
W_regularizer=l2(1e-4),
)
)
model.add(
Dense(
num_outputs,
kernel_initializer=RandomNormal(),
activation='softmax'
)
)
model.compile(
loss='categorical_crossentropy',
optimizer=Nadam(),
metrics=['mse']
)
## add early stopping
callbacks = []
callbacks.append(
EarlyStopping(patience=40)
)
model.summary()
model.fit(
# X_train,
scaled_train,
y_train,
# class_weight=test_class_weight,
sample_weight=scaled_w_train.squeeze(),
# validation_data=(X_test,y_test,w_test),
validation_data=(scaled_test,y_test,scaled_w_test.squeeze()),
batch_size=1000,
epochs=10000,
shuffle=True,
callbacks=callbacks
)
# with open('keras_model_fold{}_{}_{}_{}_{}_xgb.pkl'.format(fold, analysis, channel, sig_sample, mjj_training), 'w') as f:
# pickle.dump(model,f)
model.save('keras_model_fold{}_{}_{}_{}_{}.h5'
.format(fold, analysis, channel, sig_sample, mjj_training))
return None
def fit_keras_inc(X, channel, fold, analysis, sig_sample):
### TEST A KERAS MODEL
## START EDITING THIS FOR ODD/EVEN SPLIT
print('Training keras model fold{}'.format(fold))
# sum_w = X_train['wt_xs'].sum()
# sum_w_cat = X.groupby('multi_class')['wt_xs'].sum()
# class_weights = sum_w / sum_w_cat
X = X[X["multi_class"] != "misc"] # don't use misc
# X.multi_class.replace("qqh","ggh",inplace=True)
# X["zfeld"] = np.fabs(X.eta_h - (X.jeta_1 + X.jeta_2)/2.)
# # print X["zfeld"]
# # make centrality variable
# X["centrality"] = np.exp(-4*(X.zfeld/np.fabs(X.jdeta))**2)
# X["dphi_custom"] = np.arccos(1-X.mt_lep**2/(2.*X.pt_1*X.pt_2))
# X["dR_custom"] = np.sqrt((X.eta_1-X.eta_2)**2 + (X.dphi_custom)**2)
X.replace(-999.,-10, inplace=True)
X.replace(-9999.,-10, inplace=True)
# split
X_train,X_test, y_train,y_test,w_train,w_test = train_test_split(
X,
X['multi_class'],
X['wt_xs'],
test_size=0.25,
random_state=123456,
stratify=X['multi_class'].values,
)
print(X.head())
print(X_train.head())
print(w_train)
sum_w = X_train['wt_xs'].sum()
# print 'sum_w', sum_w
sum_w_cat = X_train.groupby('multi_class')['wt_xs'].sum()
# print 'sum_w_cat', sum_w_cat
class_weights = sum_w / sum_w_cat
class_weight_dict = dict(class_weights)
print(class_weight_dict)
# multiply w_train by class_weight now
for i in w_train.index:
for key, value in class_weight_dict.items():
if y_train[i] == key:
w_train.at[i] *= value
# replace now to get the weights right still for the individual ones
# X_train["multi_class"].replace("qqh","ggh",inplace=True)
# X_test["multi_class"].replace("qqh","ggh",inplace=True)
# y_train.replace("qqh","ggh",inplace=True)
# y_test.replace("qqh","ggh",inplace=True)
min_maxscaler = MinMaxScaler()
fit_minmax = min_maxscaler.fit(X["wt_xs"].values.reshape(-1,1))
# Fit the min max scaler on training weights
scaled_w_train = min_maxscaler.transform(w_train.values.reshape(-1,1))
## use one-hot encoding
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(y_train)
encoded_y_train = encoder.transform(y_train)
# convert integers to dummy variables (i.e. one hot encoded)
y_train = np_utils.to_categorical(encoded_y_train, num_classes=len(X_train["multi_class"].unique()))
# encoder.fit(y_test)
encoder.classes_
encoded_y_test = encoder.transform(y_test)
# convert integers to dummy variables (i.e. one hot encoded)
y_test = np_utils.to_categorical(encoded_y_test, num_classes=len(X_train["multi_class"].unique()))
print('original Y: ', X_train['multi_class'].head())
print('one-hot y: ', y_train[0])
print('one-hot y: ', y_train[1])
print('one-hot y: ', y_train[2])
print('original Y: ', X_test['multi_class'].head())
print('one-hot y: ', y_test[0])
print('one-hot y: ', y_test[1])
print('one-hot y: ', y_test[2])
dropVars = ["wt","wt_xs", "process", "multi_class","event","gen_match_1", "gen_match_2",]
if sig_sample == "tauspinner":
dropVars.append("wt_cp_sm")
if channel == "em":
dropVars.append("wt_em_qcd")
X_train = X_train.drop(dropVars, axis=1).reset_index(drop=True)
X_test = X_test.drop(dropVars, axis=1).reset_index(drop=True)
# to use names "f0" etcs
print(X_train.columns)
orig_columns = X_train.columns
X_train.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
X_test.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
print(X_train.columns)
## standard scaler
columns = X_train.columns
scaler = StandardScaler()
np_scaled_train = scaler.fit_transform(X_train.values)
with open('{}_scaler.pkl'.format(channel), 'w') as f:
pickle.dump(scaler, f)
print(X_train)
scaled_train = np_scaled_train
# scaled_train = pd.DataFrame(np_scaled_train).reset_index(drop=True)
# scaled_train.columns = columns
print(scaled_train)
np_scaled_test = scaler.transform(X_test.values)
scaled_test = np_scaled_test
print(X_test)
# scaled_test = pd.DataFrame(np_scaled_test).reset_index(drop=True)
# scaled_test.columns = columns
print(scaled_test)
## how many features
num_inputs = scaled_train.shape[1]
## how many classes
num_outputs = y_train.shape[1]
model = Sequential()
for i, nodes in enumerate([200] * 2):
if i == 0:
model.add(Dense(nodes, kernel_regularizer=l2(1e-5), input_dim=num_inputs))
else:
model.add(Dense(nodes, kernel_regularizer=l2(1e-5)))
model.add(Activation("tanh"))
model.add(Dropout(0.3))
model.add(Dense(num_outputs, kernel_regularizer=l2(1e-5)))
model.add(Activation("softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer=adam(lr=1e-4),
metrics=["accuracy"]
)
w_train = w_train.reset_index(drop=True)
w_test = w_test.reset_index(drop=True)
## add early stopping
callbacks = []
callbacks.append(
EarlyStopping(patience=50)
)
model.summary()
model.fit(
# X_train,
scaled_train,
y_train,
# class_weight=test_class_weight,
# sample_weight=scaled_w_train.squeeze(),
sample_weight=w_train,
# validation_data=(X_test,y_test,w_test),
validation_data=(scaled_test,y_test,w_test),
# validation_data=(scaled_test,y_test,w_test),
batch_size=1000,
epochs=100000,
shuffle=True,
callbacks=callbacks
)
model.save('keras_model_fold{}_{}_{}_{}.h5'
.format(fold, analysis, channel, sig_sample))
## Plotting things
y_prediction = model.predict_classes(X_test.values)
y_test = np.argmax(y_test, axis=1)
pf.plot_confusion_matrix(
y_test, y_prediction, w_test.squeeze(),
classes=list(encoder.classes_),
figname='multi_fold{}_{}_{}_{}_non-normalised_weights_cm.pdf'.format(fold, analysis, channel, sig_sample))
pf.plot_confusion_matrix(
y_test, y_prediction, w_test.squeeze(),
classes=list(encoder.classes_),
figname='multi_fold{}_{}_{}_{}_normalised_efficiency_weights_cm.pdf'.format(fold, analysis, channel, sig_sample),
normalise_by_col=True)
pf.plot_confusion_matrix(
y_test, y_prediction, w_test.squeeze(),
classes=list(encoder.classes_),
figname='multi_fold{}_{}_{}_{}_normalised_purity_weights_cm.pdf'.format(fold, analysis, channel, sig_sample),
normalise_by_row=True)
return None
def fit_tf(X, channel, fold, analysis, sig_sample, mjj_training):
### TEST A KERAS MODEL
## START EDITING THIS FOR ODD/EVEN SPLIT
print('Training keras model fold{}'.format(fold))
if mjj_training == "high":
X = X[X["multi_class"] != "misc"]
X_train,X_test, y_train,y_test,w_train,w_test = train_test_split(
X,
X['multi_class'],
X['wt_xs'],
test_size=0.25,
random_state=123456,
stratify=X['multi_class'].as_matrix(),
)
sum_w = X_train['wt_xs'].sum()
# print 'sum_w', sum_w
sum_w_cat = X_train.groupby('multi_class')['wt_xs'].sum()
# print 'sum_w_cat', sum_w_cat
class_weights = sum_w / sum_w_cat
class_weight_dict = dict(class_weights)
print(class_weight_dict)
# multiply w_train by class_weight now
for i in w_train.index:
for key, value in class_weight_dict.items():
if y_train[i] == key:
if key == "ggh" and mjj_training == "high":
# print 'before: ',w_train.at[i]
w_train.at[i] *= value * 1.5/3.
# print 'after multiplying by class_weight: ',w_train.at[i]
wt_mjj = X_train['mjj'].at[i] * 0.003104 - 0.009583 if X_train['mjj'].at[i] > 300 else 1.0 #from ROC until 1500 GeV
# wt_mjj = X_train['mjj'].at[i] * 0.01 if X_train['mjj'].at[i] > 500 and X_train['mjj'].at[i] < 1500 else 1.0 #from ROC (slightly higher) until 1500 GeV
# wt_mjj = np.sqrt(X_train['mjj'].at[i]) * 0.1368 - 1.3694 # sqrt function
# wt_mjj = 1.5 if X_train['mjj'].at[i] > 300 and X_train['mjj'].at[i] < 600 else 1.0 # step function
# wt_mjj = ((X_train['mjj'].at[i])**2 * 0.000017 - (X_train['mjj'].at[i] * 0.0017)) #second order poly
w_train.at[i] *= wt_mjj
# elif key == 'qqh' and mjj_training == "high":
# w_train.at[i] *= value*0.5
# elif key == 'ztt_embed' and mjj_training == "high":
# w_train.at[i] *= value*0.5
# elif channel == 'em' and key == 'qcd':
# w_train.at[i] *= value*2.0
else:
w_train.at[i] *= value
## use one-hot encoding
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(y_train)
encoded_y_train = encoder.transform(y_train)
# convert integers to dummy variables (i.e. one hot encoded)
y_train = np_utils.to_categorical(encoded_y_train, num_classes=4)
encoder.fit(y_test)
encoded_y_test = encoder.transform(y_test)
# convert integers to dummy variables (i.e. one hot encoded)
y_test = np_utils.to_categorical(encoded_y_test, num_classes=4)
# print w_train
# minMax = MinMaxScaler()
# w_train = minMax.fit_transform(w_train)
# print w_train
# w_test = minMax.fit_transform(w_test)
print('original Y: ', X_train['multi_class'].head())
print('one-hot y: ', y_train[0])
X_train = X_train.drop([
'wt','wt_xs', 'process', 'multi_class','event',
'gen_match_1', 'gen_match_2',
'mjj_jdeta','dijetpt_pth','dijetpt_jpt1'
], axis=1).reset_index(drop=True)
X_test = X_test.drop([
'wt','wt_xs', 'process', 'multi_class','event',
'gen_match_1', 'gen_match_2',
'mjj_jdeta','dijetpt_pth','dijetpt_jpt1'
], axis=1).reset_index(drop=True)
if channel == "em":
X_train = X_train.drop(["wt_em_qcd"], axis=1).reset_index(drop=True)
X_test = X_test.drop(["wt_em_qcd"], axis=1).reset_index(drop=True)
# to use names "f0" etcs
print(X_train.columns)
orig_columns = X_train.columns
X_train.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
X_test.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
print(X_train.columns)
## standard scaler
columns = X_train.columns
scaler = StandardScaler()
# X_train['wt'] = w_train
print(X_train.head(5))
np_scaled_train = scaler.fit_transform(X_train.as_matrix())
# with open('{}_{}_scaler.pkl'.format(channel, mjj_training), 'w') as f:
# pickle.dump(scaler, f)
scaled_train = np_scaled_train
# scaled_train = pd.DataFrame(np_scaled_train)
# scaled_train.columns = columns
# X_test['wt'] = w_test
np_scaled_test = scaler.fit_transform(X_test.as_matrix())
scaled_test = np_scaled_test
# scaled_test = pd.DataFrame(np_scaled_test)
# scaled_test.columns = columns
# X_train = X_train.drop(["wt"], axis=1).reset_index(drop=True)
# X_test = X_test.drop(["wt"], axis=1).reset_index(drop=True)
## how many features
num_inputs = scaled_train.shape[1]
## how many classes
num_outputs = 4
import tensorflow as tf
# Parameters
learning_rate = 0.1
num_steps = 500
batch_size = 128
display_step = 100
# Network Parameters
n_hidden_1 = 256
n_hidden_2 = 256
num_input = scaled_train.shape[1]
num_classes = 4
# tf Graph input
X = tf.placeholder("float", [None, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
# Create model
def neural_net(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
# Hidden fully connected layer with 256 neurons
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Construct model
logits = neural_net(X)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
for step in range(1, num_steps+1):
batch_x, batch_y = scaled_train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print(("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc)))
print("Optimization Finished!")
# Calculate accuracy for test
print(("Testing Accuracy:", \
sess.run(accuracy, feed_dict={X: scaled_test,
Y: y_test})))
# with open('keras_model_fold{}_{}_{}_{}_{}_xgb.pkl'.format(fold, analysis, channel, sig_sample, mjj_training), 'w') as f:
# pickle.dump(model,f)
# model.save_weights('keras_model_weights_{}_{}.h5'.format(channel, sig_sample))
return None
# def fit_pytorch(X, fold, analysis, channel, sig_sample, mjj_training):
# import torch
# import torch.nn as nn
# ## START EDITING THIS FOR ODD/EVEN SPLIT
# print 'Training XGBoost model fold{}'.format(fold)
# if mjj_training == "high":
# X = X[X["multi_class"] != "jetFakes"]
# X = X[X["multi_class"] != "ztt_embed"]
# print X.head()
# # for x in X.columns:
# # if x in ["pt_h"]:
# # X["exp_{}".format(str(x))] = np.exp(X[str(x)])
# # X["log_{}".format(str(x))] = np.log(X[str(x)])
# # X["{}_sq".format(str(x))] = X[str(x)]**2
# # X["{}_cb".format(str(x))] = X[str(x)]**3
# # X["{}_tanh".format(str(x))] = np.tanh(X[str(x)])
# # make new variable combinatinos
# X["dphi_custom"] = np.arccos(1-X.mt_lep**2/(2.*X.pt_1*X.pt_2))
# X["dR_custom"] = np.sqrt((X.eta_1-X.eta_2)**2 + (X.dphi_custom)**2)
# X["rms_pt"] = np.sqrt(0.5 * (X.pt_1**2 + X.pt_2**2))
# X["rms_jpt"] = np.sqrt(0.5 * (X.jpt_1**2 + X.jpt_2**2))
# # make zeppenfeld variable
# X["zfeld"] = np.fabs(X.eta_h - (X.jeta_1 + X.jeta_2)/2.)
# # print X["zfeld"]
# # make centrality variable
# X["centrality"] = np.exp(-4*(X.zfeld/np.fabs(X.jdeta))**2)
# X_train,X_test, y_train,y_test,w_train,w_test = train_test_split(
# X,
# X['multi_class'],
# X['wt_xs'],
# test_size=0.25,
# random_state=123456,
# stratify=X['multi_class'].as_matrix(),
# )
# print X_train[(X_train.multi_class == 'ggh')].shape
# del X
# gc.collect()
# # if want to plot any variables
# # pf.plot_signal_background(X[X["multi_class"] == "ggh"], X[X["multi_class"] == "qqh"], "mjj",channel,sig_sample)
# sum_w = X_train['wt_xs'].sum()
# # print 'sum_w', sum_w
# sum_w_cat = X_train.groupby('multi_class')['wt_xs'].sum()
# # print 'sum_w_cat', sum_w_cat
# class_weights = sum_w / sum_w_cat
# class_weight_dict = dict(class_weights)
# print class_weight_dict
# # multiply w_train by class_weight now
# # add mjj dependent weight for ggH
# for i in w_train.index:
# for key, value in class_weight_dict.iteritems():
# if y_train[i] == key:
# w_train.at[i] *= value
# # ## use one-hot encoding
# # # encode class values as integers
# # encoder_train = LabelEncoder()
# # encoder_test = LabelEncoder()
# # encoder_train.fit(y_train)
# # y_train = encoder_train.transform(y_train)
# # encoder_test.fit(y_test)
# # y_test = encoder_test.transform(y_test)
# ## use one-hot encoding
# # encode class values as integers
# encoder = LabelEncoder()
# encoder.fit(y_train)
# encoded_y_train = encoder.transform(y_train)
# # convert integers to dummy variables (i.e. one hot encoded)
# y_train = np_utils.to_categorical(encoded_y_train, num_classes=4)
# encoder.fit(y_test)
# encoded_y_test = encoder.transform(y_test)
# # convert integers to dummy variables (i.e. one hot encoded)
# y_test = np_utils.to_categorical(encoded_y_test, num_classes=4)
# # test_class_weight = class_weight.compute_class_weight(
# # 'balanced', np.unique(encoded_Y), encoded_Y
# # )
# # print test_class_weight
# # print 'original Y: ', X_train['multi_class'].head()
# # print 'one-hot y: ', y_train
# print X_train.head(5)
# X_train = X_train.drop([
# 'wt','wt_xs', 'process', 'multi_class','event',
# 'gen_match_1', 'gen_match_2',#'eta_tt',
# ], axis=1).reset_index(drop=True)
# X_test = X_test.drop([
# 'wt','wt_xs', 'process', 'multi_class','event',
# 'gen_match_1', 'gen_match_2',#'eta_tt',
# ], axis=1).reset_index(drop=True)
# if channel == "em":
# X_train = X_train.drop(["wt_em_qcd"], axis=1).reset_index(drop=True)
# X_test = X_test.drop(["wt_em_qcd"], axis=1).reset_index(drop=True)
# if mjj_training == "high":
# X_train = X_train.drop(["dphi_custom","dR","opp_sides"], axis=1).reset_index(drop=True)
# X_test = X_test.drop(["dphi_custom","dR","opp_sides"], axis=1).reset_index(drop=True)
# # to use names "f0" etcs
# print X_train.columns
# orig_columns = X_train.columns
# X_train.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
# X_test.columns = ["f{}".format(x) for x in np.arange(X_train.shape[1])]
# print X_train.columns
# ## standard scaler
# columns = X_train.columns
# scaler = StandardScaler()
# # X_train['wt'] = w_train
# print X_train.head(5)
# np_scaled_train = scaler.fit_transform(X_train.as_matrix())
# # with open('{}_{}_scaler.pkl'.format(channel, mjj_training), 'w') as f:
# # pickle.dump(scaler, f)
# scaled_train = np_scaled_train
# scaled_train = pd.DataFrame(np_scaled_train)
# scaled_train.columns = columns
# # X_test['wt'] = w_test
# np_scaled_test = scaler.fit_transform(X_test.as_matrix())
# scaled_test = np_scaled_test
# scaled_test = pd.DataFrame(np_scaled_test)
# scaled_test.columns = columns
# # X_train = X_train.drop(["wt"], axis=1).reset_index(drop=True)
# # X_test = X_test.drop(["wt"], axis=1).reset_index(drop=True)
# # X_train = X_train.drop([
# # 'zfeld','jeta_1','jeta_2'
# # ], axis=1).reset_index(drop=True)
# # X_test = X_test.drop([
# # 'zfeld','jeta_1','jeta_2'
# # ], axis=1).reset_index(drop=True)
# num_input = scaled_train.shape[1]
# # Defining input size, hidden layer size, output size and batch size
# # respectively
# n_in, n_h, n_out, batch_size = num_input, 2, 2, 64
# # Create a model
# model = nn.Sequential(
# nn.Linear(n_in, n_h),
# nn.ReLU(),
# nn.Linear(n_h, n_out),
# nn.Sigmoid()
# )
# # Construct the loss function
# criterion = torch.nn.MSELoss()
# # Construct the optimizer (Stochastic Gradient Descent in this case)
# optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# # Gradient Descent
# for epoch in range(50):
# # Forward pass: Compute predicted y by passing x to the model
# y_pred = model(scaled_train)
# # Compute and print loss
# loss = criterion(y_pred, y_train)
# print('epoch: ', epoch,' loss: ', loss.item())
# # Zero gradients, perform a backward pass, and update the weights.
# optimizer.zero_grad()
# # perform a backward pass (backpropagation)
# loss.backward()
# # Update the parameters
# optimizer.step()
# return None
def write_score(data, model, channel, doSystematics):
path = '/vols/cms/akd116/Offline/output/SM/2018/Mar18' # path of nominal ntuples
# for full systematics need this:
systematics = [
'TSCALE_UP', 'TSCALE_DOWN', 'TSCALE0PI_UP', 'TSCALE0PI_DOWN', 'TSCALE1PI_UP',
'TSCALE1PI_DOWN', 'TSCALE3PRONG_UP', 'TSCALE3PRONG_DOWN' , 'JES_UP', 'JES_DOWN',
'EFAKE0PI_DOWN', 'EFAKE0PI_UP', 'EFAKE1PI_DOWN', 'EFAKE1PI_UP', 'MUFAKE0PI_DOWN' ,
'MUFAKE0PI_UP', 'MUFAKE1PI_DOWN', 'MUFAKE1PI_UP', 'METUNCL_UP', 'METUNCL_DOWN',
'METCL_UP', 'METCL_DOWN',
# 'TSCALE_UP_1', 'TSCALE_UP_2', 'TSCALE_DOWN_2', 'TSCALE_UP_3', 'TSCALE_DOWN_3',
# 'TSCALE_UP_0.5', 'TSCALE_DOWN_0.5', 'TSCALE_UP_1.5', 'TSCALE_DOWN_1.5', 'TSCALE_UP_2.5',
# 'TSCALE_DOWN_2.5', 'BTAG_UP', 'BTAG_DOWN', 'BFAKE_UP', 'BFAKE_DOWN',
# 'HF_UP', 'HF_DOWN', 'HFSTATS1_UP', 'HFSTATS1_DOWN', 'HFSTATS2_UP',
# 'HFSTATS2_DOWN', 'CFERR1_UP', 'CFERR1_DOWN', 'CFERR2_UP', 'CFERR2_DOWN',
# 'LF_UP', 'LF_DOWN', 'LFSTATS1_UP', 'LFSTATS1_DOWN', 'LFSTATS2_UP',
# 'LFSTATS2_DOWN', 'MET_SCALE_UP', 'MET_SCALE_DOWN', 'MET_RES_UP', 'MET_RES_DOWN',
]
if len(data) > 0:
gb = data.groupby('process')
df_dict = {x: gb.get_group(x) for x in gb.groups}
score = []
for key, value in df_dict.items():
print('Writing into {}_{}_2016.root'.format(key, channel))
value = value.drop(['process'], axis=1)
if len(data) > 0:
score = model.predict_proba(value)[:,1]
else:
score = np.array(0.0)
score.dtype = [('mva_score', np.float32)]
array2root(
score,
'{}/{}_{}_2016.root'.format(path, key, channel),
'ntuple',
mode = 'update'
)
if doSystematics:
for systematic in systematics:
print('Writing into {}/{}_{}_2016.root'.format(systematic, key, channel))
array2root(
score,
'{}/{}/{}_{}_2016.root'.format(path, systematic, key, channel),
'ntuple',
mode = 'update'
)
return None
def write_score_multi(data, model, analysis, channel, sig_sample, doSystematics, name):
## START EDITING THIS
path = '/vols/cms/akd116/Offline/output/SM/2018/Mar19' # nominal ntuples
# for full systematics need this:
systematics = [
'TSCALE_UP', 'TSCALE_DOWN', 'TSCALE0PI_UP', 'TSCALE0PI_DOWN', 'TSCALE1PI_UP',
'TSCALE1PI_DOWN', 'TSCALE3PRONG_UP', 'TSCALE3PRONG_DOWN' , 'JES_UP', 'JES_DOWN',
'EFAKE0PI_DOWN', 'EFAKE0PI_UP', 'EFAKE1PI_DOWN', 'EFAKE1PI_UP', 'MUFAKE0PI_DOWN' ,
'MUFAKE0PI_UP', 'MUFAKE1PI_DOWN', 'MUFAKE1PI_UP', 'METUNCL_UP', 'METUNCL_DOWN',
'METCL_UP', 'METCL_DOWN',
# 'TSCALE_UP_1', 'TSCALE_UP_2', 'TSCALE_DOWN_2', 'TSCALE_UP_3', 'TSCALE_DOWN_3',
# 'TSCALE_UP_0.5', 'TSCALE_DOWN_0.5', 'TSCALE_UP_1.5', 'TSCALE_DOWN_1.5', 'TSCALE_UP_2.5',
# 'TSCALE_DOWN_2.5', 'BTAG_UP', 'BTAG_DOWN', 'BFAKE_UP', 'BFAKE_DOWN',
# 'HF_UP', 'HF_DOWN', 'HFSTATS1_UP', 'HFSTATS1_DOWN', 'HFSTATS2_UP',
# 'HFSTATS2_DOWN', 'CFERR1_UP', 'CFERR1_DOWN', 'CFERR2_UP', 'CFERR2_DOWN',
# 'LF_UP', 'LF_DOWN', 'LFSTATS1_UP', 'LFSTATS1_DOWN', 'LFSTATS2_UP',
# 'LFSTATS2_DOWN', 'MET_SCALE_UP', 'MET_SCALE_DOWN', 'MET_RES_UP', 'MET_RES_DOWN',
]
if len(data) > 0:
gb = data.groupby('process')
df_dict = {x: gb.get_group(x) for x in gb.groups}
score = []
for key, value in df_dict.items():
print('Writing into {}_{}_2016.root'.format(key, channel))
value = value.drop(['process'], axis=1)
if len(data) > 0:
# assign event to max score class
# print model.predict_proba(value)
# print model.predict(value)
for index, ls in enumerate(model.predict_proba(value)):
# print index
# print ls
score.append(max(ls))
# print score
np_score = np.array(score)
cat = np.array(model.predict(value))
else:
np_score = np.array(0.0)
cat = ''
if sig_sample == 'powheg':
np_score.dtype = [('mva_score_{}_{}_powheg'.format(analysis, name), np.float32)]
cat.dtype = [('mva_cat_{}_{}_powheg'.format(analysis, name), np.int)]
elif sig_sample == 'JHU':
np_score.dtype = [('mva_score_{}_{}_JHU'.format(analysis, name), np.float32)]
cat.dtype = [('mva_cat_{}_{}_JHU'.format(analysis, name), np.int)]
array2root(
np_score,
'{}/{}_{}_2016.root'.format(path, key, channel),
'ntuple',
mode = 'update'
)
array2root(
cat,
'{}/{}_{}_2016.root'.format(path, key, channel),
'ntuple',
mode = 'update'
)
if doSystematics:
for systematic in systematics:
print('Writing into {}/{}_{}_2016.root'.format(systematic, key, channel))
array2root(
np_score,
'{}/{}/{}_{}_2016.root'.format(path, systematic, key, channel),
'ntuple',
mode = 'update'
)
array2root(
cat,
'{}/{}/{}_{}_2016.root'.format(path, systematic, key, channel),
'ntuple',
mode = 'update'
)
return None
def write_score_multi_folds(data, model, analysis, channel, sig_sample, fold, name):
## START EDITING THIS
path = '/vols/cms/akd116/Offline/output/SM/2018/Apr23' # nominal ntuples
if len(data) > 0:
gb = data.groupby('process')
df_dict = {x: gb.get_group(x) for x in gb.groups}
score = []
for key, value in df_dict.items():
print('Writing into {}_{}_2016.root'.format(key, channel))
value = value.drop(['process'], axis=1)
if len(data) > 0:
# assign event to max score class
# print model.predict_proba(value)
# print model.predict(value)
for index, ls in enumerate(model.predict_proba(value)):
# print index
# print ls
score.append(max(ls))
# print score
np_score = np.array(score)
cat = np.array(model.predict(value))
else:
np_score = np.array(0.0)
cat = ''
if sig_sample == 'powheg':
np_score.dtype = [('mva_score_{}_{}_{}_powheg'.format(fold, analysis, name), np.float32)]
cat.dtype = [('mva_cat_{}_{}_{}_powheg'.format(fold, analysis, name), np.int)]
elif sig_sample == 'JHU':
np_score.dtype = [('mva_score_{}_{}_{}_JHU'.format(fold, analysis, name), np.float32)]
cat.dtype = [('mva_cat_{}_{}_{}_JHU'.format(fold, analysis, name), np.int)]
array2root(
np_score,
'{}/{}_{}_2016.root'.format(path, key, channel),
'ntuple',
mode = 'update'
)
array2root(
cat,
'{}/{}_{}_2016.root'.format(path, key, channel),
'ntuple',
mode = 'update'
)
return None
def write_score_multi_syst(data, model, analysis, channel, sig_sample, fold, doSystematics, name):
## START EDITING THIS
path = '/vols/cms/akd116/Offline/output/SM/2018/Apr23' # nominal ntuples
# for full systematics need this:
systematics = [
'TSCALE_UP', 'TSCALE_DOWN', 'TSCALE0PI_UP', 'TSCALE0PI_DOWN', 'TSCALE1PI_UP',
'TSCALE1PI_DOWN', 'TSCALE3PRONG_UP', 'TSCALE3PRONG_DOWN' , 'JES_UP', 'JES_DOWN',
'EFAKE0PI_DOWN', 'EFAKE0PI_UP', 'EFAKE1PI_DOWN', 'EFAKE1PI_UP', 'MUFAKE0PI_DOWN' ,
'MUFAKE0PI_UP', 'MUFAKE1PI_DOWN', 'MUFAKE1PI_UP', 'METUNCL_UP', 'METUNCL_DOWN',
'METCL_UP', 'METCL_DOWN',
]
if len(data) > 0:
gb = data.groupby('process')
df_dict = {x: gb.get_group(x) for x in gb.groups}
score = []
for key, value in df_dict.items():
print('Writing into {}_{}_2016.root'.format(key, channel))
value = value.drop(['process'], axis=1)
if len(data) > 0:
# assign event to max score class
# print model.predict_proba(value)
# print model.predict(value)
for index, ls in enumerate(model.predict_proba(value)):
# print index
# print ls
score.append(max(ls))
# print score
np_score = np.array(score)
cat = np.array(model.predict(value))
else:
np_score = np.array(0.0)
cat = ''
if sig_sample == 'powheg':
np_score.dtype = [('mva_score_{}_{}_{}_powheg'.format(fold, analysis, name), np.float32)]
cat.dtype = [('mva_cat_{}_{}_{}_powheg'.format(fold, analysis, name), np.int)]
elif sig_sample == 'JHU':
np_score.dtype = [('mva_score_{}_{}_{}_JHU'.format(fold, analysis, name), np.float32)]
cat.dtype = [('mva_cat_{}_{}_{}_JHU'.format(fold, analysis, name), np.int)]
array2root(
np_score,
'{}/{}_{}_2016.root'.format(path, key, channel),
'ntuple',
mode = 'update'
)
array2root(
cat,
'{}/{}_{}_2016.root'.format(path, key, channel),
'ntuple',
mode = 'update'
)
if doSystematics:
for systematic in systematics:
print('Writing into {}/{}_{}_2016.root'.format(systematic, key, channel))
array2root(
np_score,
'{}/{}/{}_{}_2016.root'.format(path, systematic, key, channel),
'ntuple',
mode = 'update'
)
array2root(
cat,
'{}/{}/{}_{}_2016.root'.format(path, systematic, key, channel),
'ntuple',
mode = 'update'
)
return None
def compute_class_weights(df):#, channel, sig_sample):
# calculate sum of all event weights per category
print(df['wt'])
sum_w = df['wt'].sum()
print(sum_w)
class_weights = []
# calculate sum of event weights per category
for cat in df['multi_class']:
sum_w_cat = df['wt'].sum()
try:
weights = sum_w / sum_w_cat
return class_weights.append(weights)
except ZeroDivisionError:
'Cannot divide by zero'
| 34.473857
| 187
| 0.528846
| 16,969
| 136,482
| 3.971595
| 0.045023
| 0.029379
| 0.016203
| 0.020833
| 0.865478
| 0.84466
| 0.829035
| 0.821364
| 0.80427
| 0.792771
| 0
| 0.029478
| 0.329142
| 136,482
| 3,958
| 188
| 34.482567
| 0.706586
| 0.339591
| 0
| 0.733892
| 0
| 0
| 0.128585
| 0.029388
| 0
| 0
| 0
| 0
| 0.002095
| 1
| 0.012048
| false
| 0
| 0.024096
| 0
| 0.048193
| 0.075432
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
87681e99ac2305095ec49eef21d912df0915eec5
| 20,948
|
py
|
Python
|
AutotestPlatform/website/apiRunner/APIRunner.py
|
yzypals/AutoTestingPlatform
|
cfb2c53337406347fad37bd65568b22cdc76fdca
|
[
"Apache-2.0"
] | null | null | null |
AutotestPlatform/website/apiRunner/APIRunner.py
|
yzypals/AutoTestingPlatform
|
cfb2c53337406347fad37bd65568b22cdc76fdca
|
[
"Apache-2.0"
] | 2
|
2020-06-06T00:51:32.000Z
|
2021-06-10T22:40:50.000Z
|
AutotestPlatform/website/apiRunner/APIRunner.py
|
yzypals/AutoTestingPlatform
|
cfb2c53337406347fad37bd65568b22cdc76fdca
|
[
"Apache-2.0"
] | 1
|
2020-05-31T03:49:24.000Z
|
2020-05-31T03:49:24.000Z
|
#!/usr/bin/env python
#-*-encoding:utf-8-*-
__author__ = 'shouke'
import time
import json
from .common.log import logger
from .test_case import TestCase
from .common.mydb import MyDB
from .running_plan import RunningPlan
from .common.redis_client import RedisClient
from .common.globalvar import db_related_to_project_dic
from .common.globalvar import redis_related_to_project_dic
from .common.globalvar import global_variable_dic
from .test_plan import TestPlan
from collections import OrderedDict
class APIRunner:
def __init__(self, log_websocket_consumer):
self.log_websocket_consumer = log_websocket_consumer
self.test_platform_db = MyDB(log_websocket_consumer, db='TESTPLATFORM')
def debug_case_or_suit(self, project_id, id):
'''调试运行单个用例或者单个测试套件'''
try:
msg = '正在查询项目[ID:%s]相关信息' % project_id
logger.info(msg)
self.log_websocket_consumer.info(msg)
result = self.test_platform_db.select_one_record('SELECT protocol, host, port, environment_id, valid_flag '
'FROM `website_api_project_setting` WHERE id = %s', (project_id,))
if result[0] and result[1]:
protocol, host, port, environment_id, valid_flag = result[1]
msg = '正在查询与项目关联的数据库信息'
logger.info(msg)
self.log_websocket_consumer.info(msg);
result = self.test_platform_db.select_many_record("SELECT db_type, db_alias, db_name, db_host, db_port, db_user, db_passwd "
"FROM `website_database_setting` "
"WHERE locate('API%s', project_id) != 0 AND environment_id= '%s'" % (project_id, environment_id))
if result[0] and result[1]:
for record in result[1]:
db_type, db_alias, db_name, db_host, db_port, db_user, db_passwd = record
if db_type == 'MySQL':
mydb = MyDB(self.log_websocket_consumer, db_name=db_name, db_host=db_host, port=db_port, user=db_user, password=db_passwd, charset='utf8')
db_related_to_project_dic[db_alias] = mydb
elif db_type == 'Redis':
if not db_passwd.strip():
db_passwd = None
if db_name.strip() == '':
db_name = '0'
myredis = RedisClient(self.log_websocket_consumer, host=db_host, port=db_port, password=db_passwd, db=db_name, charset='utf-8')
redis_related_to_project_dic[db_alias] = myredis
elif not result[0]:
msg = '查询项目相关的数据库配置信息出错:%s' % result[1]
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
logger.info('正在查询与项目关联的全局变量')
result = self.test_platform_db.select_many_record("SELECT `name`, `value` "
"FROM `website_global_variable_setting` "
"WHERE project_type='API项目' AND locate('%s', project_id) != 0 AND locate('%s', env_id) != 0 " % (project_id, environment_id))
if result[0] and result[1]:
for record in result[1]:
name, value = record
name = name
global_variable_dic[name] = value
elif not result[0]:
msg = '查询项目相关的全局变量配置信息出错:%s' % result[1]
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
try:
if 'global_headers' in global_variable_dic.keys():
global_headers = global_variable_dic['global_headers']
# 防止用户输入了中文冒号,替换为英文冒号,不然经过global_headers.encode("utf-8").decode("latin1")这样编码转换,
# 会把"key":中的中文冒号解码为非英文冒号,导致执行json loads函数时会报错;
# 另外,请求头从数据库读取,可能涉及到换行符,需要去掉
global_headers = global_headers.replace(':', ':').replace('\t', '')
global_headers = json.loads(global_headers, object_pairs_hook=OrderedDict)
else:
global_headers = {}
except Exception as e:
msg = '%s' % e
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
msg = '正在查询ID:%s标识的用例(套件)相关信息' % id
logger.info(msg)
self.log_websocket_consumer.info(msg)
query = 'SELECT id, text FROM `website_api_case_tree` WHERE project_id = %s AND id = %s' % (project_id, id)
result = self.test_platform_db.select_one_record(query)
if result[0] and result[1]:
record = result[1]
case_id, case_name = record
execution_num = str(int(time.time())) # 执行编号
query = 'SELECT id, text FROM `website_api_case_tree` WHERE project_id = %s AND parent_id = %s ' \
'AND id NOT IN (SELECT parent_id FROM `website_api_case_tree` WHERE project_id=%s)' \
'ORDER BY `order` ASC' % (project_id, id, project_id)
result = self.test_platform_db.select_many_record(query)
if result[0] and result[1]:
msg = 'ID标识的是测试套件'
logger.info(msg)
self.log_websocket_consumer.info(msg)
records = result[1]
for record in records:
case_id, case_name = record
test_case = TestCase(execution_num, 0, case_id, '--', case_name, protocol, host, port, global_headers, self.log_websocket_consumer, self.test_platform_db)
msg = '======================开始运行测试用例[名称:%s, ID:%s]======================' % (case_name, case_id)
logger.info(msg)
self.log_websocket_consumer.info(msg)
result = test_case.run(True)
if not result[0]:
msg = '用例(ID:%s 名称:%s)运行出错:%s' % (case_id, case_name, result[2])
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
return [True, '调试运行成功']
elif result[0] and not result[1]:
msg = 'ID标识的是测试用例,开始执行用例'
logger.info(msg)
self.log_websocket_consumer.info(msg)
test_case = TestCase(execution_num, 0, case_id, '--', case_name, protocol, host, port, global_headers, self.log_websocket_consumer, self.test_platform_db)
msg = '======================开始运行测试用例[名称:%s, ID:%s]======================' % (case_name, case_id)
logger.info(msg)
self.log_websocket_consumer.info(msg)
result = test_case.run(True)
if not result[0]:
msg = '用例(ID:%s 名称:%s)运行出错:%s' % (case_id, case_name, result[2])
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
else:
return[True, '调试运行成功']
else:
msg = '查询出错:%s' % result[1]
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
elif result[0] and not result[1]:
reason = '未查找到相关信息,请检查项目ID(%s),用例(套件)标识ID(%s)是否正确'
logger.warn(reason)
self.log_websocket_consumer.warn(reason)
return [False, reason]
else:
msg = '查找相关信息失败:%s' % result[1]
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
elif result[0] and not result[1]:
msg = '未查询到项目相关的信息'
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
else:
msg = '查询项目相关信息失败:%s' % result[1]
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
except Exception as e:
msg = '%s' % e
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
finally:
msg = '正在释放资源'
logger.info(msg)
self.log_websocket_consumer.info(msg)
msg = '正在关闭数据库连接'
logger.info(msg)
self.log_websocket_consumer.info(msg)
for key, db in db_related_to_project_dic.copy().items():
db.close()
del db_related_to_project_dic[key]
self.test_platform_db.close()
msg = '正在清理与项目关联的全局变量'
logger.info(msg)
self.log_websocket_consumer.info(msg)
global_variable_dic.clear()
def debug_test_plan(self, plan_id):
'''调试运行单个测试计划'''
try:
msg = '正在查询测试计划[ID:%s]相关信息' % plan_id
logger.info(msg)
self.log_websocket_consumer.info(msg)
result = self.test_platform_db.select_one_record('SELECT plan_name, valid_flag, project_id, project_name FROM `website_api_test_plan` WHERE id = %s', (plan_id,))
if result[0] and result[1]:
plan_name, switch, project_id, project_name = result[1]
msg = '正在查询与计划关联的项目相关信息'
logger.info(msg)
self.log_websocket_consumer.info(msg)
result = self.test_platform_db.select_one_record('SELECT protocol, host, port, environment_id, valid_flag '
'FROM `website_api_project_setting` WHERE id = %s', (project_id,))
if result[0] and result[1]:
protocol, host, port, environment_id, valid_flag = result[1]
if valid_flag == '启用':
msg = '正在查询与项目关联的数据库信息'
logger.info(msg)
self.log_websocket_consumer.info(msg)
result = self.test_platform_db.select_many_record("SELECT db_type, db_alias, db_name, db_host, db_port, db_user, db_passwd "
"FROM `website_database_setting` "
"WHERE locate('API%s', project_id) != 0 AND environment_id= '%s'" % (project_id, environment_id))
if result[0] and result[1]:
for record in result[1]:
db_type, db_alias, db_name, db_host, db_port, db_user, db_passwd = record
if db_type == 'MySQL':
mydb = MyDB(self.log_websocket_consumer, db_name=db_name, db_host=db_host, port=db_port, user=db_user, password=db_passwd, charset='utf8')
db_related_to_project_dic[db_alias] = mydb
elif db_type == 'Redis':
if not db_passwd.strip():
db_passwd = None
if db_name.strip() == '':
db_name = '0'
myredis = RedisClient(self.log_websocket_consumer, host=db_host, port=db_port, password=db_passwd, db=db_name, charset='utf-8')
redis_related_to_project_dic[db_alias] = myredis
elif not result[0]:
msg = '查询项目相关的数据库配置信息出错:%s' % result[1]
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
msg = '正在查询与项目关联的全局变量'
logger.info(msg)
self.log_websocket_consumer.info(msg)
result = self.test_platform_db.select_many_record("SELECT `name`, `value` "
"FROM `website_global_variable_setting` "
"WHERE project_type='API项目' AND locate('%s', project_id) != 0 AND locate('%s', env_id) != 0 ", (project_id, environment_id))
if result[0] and result[1]:
for record in result[1]:
name, value = record
name = name
global_variable_dic[name] = value
elif not result[0]:
msg = '查询项目相关的全局变量配置信息出错:%s' % result[1]
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
if 'global_headers' in global_variable_dic.keys():
global_headers = global_variable_dic['global_headers']
# 防止用户输入了中文冒号,替换为英文冒号,不然经过global_headers.encode("utf-8").decode("latin1")这样编码转换,
# 会把"key":中的中文冒号解码为非英文冒号,导致执行json loads函数时会报错;
# 另外,请求头从数据库读取,可能涉及到换行符,需要去掉
global_headers = global_headers.replace(':', ':').replace('\t', '')
global_headers = json.loads(global_headers, object_pairs_hook=OrderedDict)
else:
global_headers = {}
if switch == '启用':
msg = '======================开始运行测试计划[名称:%s, ID:%s]======================' % (plan_name, plan_id)
logger.info(msg)
self.log_websocket_consumer.info(msg)
test_plan = TestPlan(plan_id, plan_name, project_id, project_name, protocol, host, port, global_headers, self.test_platform_db, self.log_websocket_consumer)
result = test_plan.run(True)
if not result[0]:
msg = '调试运行失败:%s' % result[1]
logger.info(msg)
self.log_websocket_consumer.info(msg)
return [False, msg]
else:
return [True, '调试运行成功']
else:
msg = '测试计划已被禁用'
logger.warn(msg)
self.log_websocket_consumer.warn(msg)
return [False, msg]
else:
msg = '测试计划运行失败,计划关联的项目%s已被禁用' % project_name
logger.warn(msg)
self.log_websocket_consumer.warn(msg)
return [False, msg]
elif result[0] and not result[1]:
msg = '运行失败:未查询到计划相关信息'
logger.warn(msg)
self.log_websocket_consumer.warn(msg)
return [False, msg]
else:
msg = '运行失败:%s' % result[1]
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
except Exception as e:
msg = '%s' % e
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
finally:
msg = '正在释放资源'
logger.info(msg)
self.log_websocket_consumer.info(msg)
msg = '正在关闭数据库连接'
logger.info(msg)
self.log_websocket_consumer.info(msg)
for key, db in db_related_to_project_dic.copy().items():
db.close()
del db_related_to_project_dic[key]
self.test_platform_db.close()
msg = '正在清理与项目关联的全局变量'
logger.info(msg)
self.log_websocket_consumer.info(msg)
global_variable_dic.clear()
def run_running_plan(self, running_plan_no, debug=False): # debug True 调试模式
'''(调试)运行单个运行计划'''
try:
msg = '当前运行计划编码为:%s, 正在查询该运行计划相关信息' % running_plan_no
logger.info(msg)
self.log_websocket_consumer.info(msg)
result = self.test_platform_db.select_one_record('SELECT running_plan_name,project_id, project_name, plan_name, plan_id, valid_flag '
'FROM `website_running_plan` WHERE running_plan_num =%s', (running_plan_no,))
if result[0] and result[1]:
running_plan_name, project_id, project_name, plan_name, plan_id_list, valid_flag = result[1]
plan_id_list = plan_id_list.split(',') # 转字符串表示的list为列表
msg = '待运行项目:名称:%s,ID:%s,关联的测试计划有:%s' % (project_name, project_id, plan_name)
logger.info(msg)
self.log_websocket_consumer.info(msg)
if valid_flag == '启用':
running_plan = RunningPlan(running_plan_no, running_plan_name, project_id, project_name, plan_name, plan_id_list, self.test_platform_db, self.log_websocket_consumer)
msg = '======================开始执行运行计划[名称:%s]======================' % running_plan_name
logger.info(msg)
self.log_websocket_consumer.info(msg)
result = running_plan.run(debug)
if not debug:
run_result = result[0]
if result[0]:
run_result = result[0]
mark = result[1]
else:
mark = result[2]
logger.error(mark)
self.log_websocket_consumer.error(mark)
msg = '正在更新数据库运行计划的运行状态'
logger.info(msg)
self.log_websocket_consumer.info(msg)
update_query = "UPDATE `website_running_plan` SET running_status ='%s', remark='%s' WHERE running_plan_num= %s"
data = (result[1], result[2].replace("'",'\"'), running_plan_no)
result = self.test_platform_db.execute_update(update_query, data)
if not result[0]:
msg = '更新数据库运行计划的运行状态失败:%s' % result[1]
logger.error(msg)
self.log_websocket_consumer.error(msg)
mark = mark + '&' + msg
return [run_result, mark]
else:
return [result[0], result[2]]
else:
msg = '执行失败,运行计划已被禁用'
logger.warn(msg)
self.log_websocket_consumer.warn(msg)
return [False, msg]
elif result[0] and not result[1]:
msg = '未查询到运行计划相关的信息'
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
else:
msg = '查询运行计划相关信息失败:%s' % result[1]
logger.error(msg)
self.log_websocket_consumer.error(msg)
return [False, msg]
except Exception as e:
msg = '%s' % e
logger.error(msg)
return [False, msg]
finally:
msg = '正在释放资源'
logger.info(msg)
self.log_websocket_consumer.info(msg)
msg = '正在关闭数据库连接'
logger.info(msg)
self.log_websocket_consumer.info(msg)
self.test_platform_db.close()
msg = '正在清理与项目关联的全局变量'
logger.info(msg)
self.log_websocket_consumer.info(msg)
global_variable_dic.clear()
| 51.343137
| 199
| 0.481764
| 2,087
| 20,948
| 4.587446
| 0.103498
| 0.076457
| 0.127428
| 0.147901
| 0.810215
| 0.784207
| 0.779716
| 0.767391
| 0.73334
| 0.716002
| 0
| 0.007356
| 0.422427
| 20,948
| 407
| 200
| 51.469287
| 0.783949
| 0.020002
| 0
| 0.758427
| 0
| 0.005618
| 0.122256
| 0.034491
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011236
| false
| 0.033708
| 0.033708
| 0
| 0.123596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
87765b6d76526c607c0f3e7b6c89482279a36701
| 9,584
|
py
|
Python
|
Windows/etc/scrap/db_wordlist.py
|
Dave360-crypto/Oblivion
|
0f5619ecba6a9b1ebc6dc6f4988ef6c542bf8ca3
|
[
"BSD-3-Clause"
] | 339
|
2020-11-30T16:02:29.000Z
|
2022-03-29T22:10:44.000Z
|
Windows/etc/scrap/db_wordlist.py
|
tracid56/Oblivion
|
f16dffbb6fab18c178aacda7f177ec3ae82d1997
|
[
"BSD-3-Clause"
] | 5
|
2021-01-03T18:59:02.000Z
|
2021-12-09T13:22:57.000Z
|
Windows/etc/scrap/db_wordlist.py
|
tracid56/Oblivion
|
f16dffbb6fab18c178aacda7f177ec3ae82d1997
|
[
"BSD-3-Clause"
] | 71
|
2020-11-30T19:38:04.000Z
|
2022-03-28T05:20:34.000Z
|
"""
List of word lists/Lista de word lists.
GitHub of the author of the word lists/GitHub do autor das word lists: https://github.com/danielmiessler
"""
lista_wordlists_debug = ['https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/probable-v2-top12000.txt']
lista_wordlists = [
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/probable-v2-top12000.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/mssql-passwords-nansh0u-guardicore.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/openwall.net-all.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-05.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-10.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-15.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-20.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-25.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-30.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-35.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-40.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-45.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-50.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-55.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-60.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-65.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-70.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-75.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/000webhost.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/Ashley-Madison.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/Lizard-Squad.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/adobe100.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/elitehacker-withcount.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/elitehacker.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/faithwriters-withcount.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/faithwriters.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/hak5-withcount.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/hak5.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/honeynet-withcount.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/honeynet.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/honeynet2.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/hotmail.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/izmy.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/md5decryptor-uk.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/muslimMatch-withcount.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/muslimMatch.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/myspace-withcount.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/myspace.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/phpbb-cleaned-up.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/phpbb-withcount.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/phpbb.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/porn-unknown-withcount.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/porn-unknown.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/singles.org-withcount.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/singles.org.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/tuscl.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/youporn2012-raw.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/youporn2012.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/twitter-banned.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/unkown-azul.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-10.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-100.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-1000.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-10000.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-100000.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-1000000.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-dup.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/richelieu-french-top5000.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/richelieu-french-top20000.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/probable-v2-top207.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/probable-v2-top1575.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/dutch_wordlist',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/dutch_passwordlist.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/dutch_common_wordlist.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/der-postillon.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/darkweb2017-top10000.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/darkweb2017-top1000.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/darkweb2017-top100.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/darkweb2017-top10.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/darkc0de.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/clarkson-university-82.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/cirt-default-passwords.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/bt4-password.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/UserPassCombo-Jay.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/PHP-Magic-Hashes.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Most-Popular-Letter-Passes.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Keyboard-Combinations.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/alleged-gmail-passwords.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/bible-withcount.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/bible.txt',
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Leaked-Databases/carders.cc.txt',
]
| 103.053763
| 127
| 0.808535
| 1,075
| 9,584
| 7.20186
| 0.12093
| 0.184448
| 0.268019
| 0.300181
| 0.920951
| 0.920951
| 0.920951
| 0.920951
| 0.920951
| 0.806897
| 0
| 0.017066
| 0.046223
| 9,584
| 92
| 128
| 104.173913
| 0.829887
| 0.015025
| 0
| 0
| 0
| 0.929412
| 0.924618
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.976471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 12
|
5e670b4bce8b3c8ded8c909860f9336a04737292
| 3,427
|
py
|
Python
|
netbuilder/examples/trainingShapes.py
|
andresberejnoi/machineLearning
|
b1fc5c684c97bdd42959a5ea6309563329ac3227
|
[
"MIT"
] | 4
|
2018-05-09T01:58:52.000Z
|
2021-07-28T07:47:41.000Z
|
netbuilder/examples/trainingShapes.py
|
zinph/NetBuilder
|
273f845db4cb821b4cf0a4c03770a0b909fbb560
|
[
"MIT"
] | null | null | null |
netbuilder/examples/trainingShapes.py
|
zinph/NetBuilder
|
273f845db4cb821b4cf0a4c03770a0b909fbb560
|
[
"MIT"
] | 3
|
2017-05-21T17:09:16.000Z
|
2019-07-08T09:07:53.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 5 0.59:-0.53:35 2-0.50.56
@author: andresberejnoi
"""
import numpy as np
shapes2 = {0: np.array([ [-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5]]),
1: np.array([ [-0.5,-0.5,0.5,0.5,-0.5,-0.5],
[-0.5,0.5,0.5,0.5,-0.5,-0.5],
[-0.5,-0.5,0.5,0.5,-0.5,-0.5],
[-0.5,-0.5,0.5,0.5,-0.5,-0.5],
[-0.5,-0.5,0.5,0.5,-0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5]]),
2: np.array([ [-0.5,-0.5,0.5,0.5,-0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,-0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,-0.5,-0.5,0.5,-0.5,-0.5],
[-0.5,-0.5,0.5,-0.5,-0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5]]),
3: np.array([ [-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,-0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,-0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,-0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5]]),
4: np.array([ [-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,-0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,-0.5,-0.5,-0.5,0.5,-0.5]]),
5: np.array([ [-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,-0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,-0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5]]),
6: np.array([ [-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5]]),
7: np.array([ [-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,-0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,-0.5,-0.5,0.5,-0.5,-0.5],
[-0.5,-0.5,-0.5,0.5,-0.5,-0.5],
[-0.5,-0.5,0.5,-0.5,-0.5,-0.5],
[-0.5,-0.5,0.5,-0.5,-0.5,-0.5]]),
8: np.array([ [-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,0.5,0.5,0.5,-0.5]]),
9: np.array([ [-0.5,-0.5,0.5,0.5,-0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,-0.5,0.5,0.5,0.5,-0.5],
[-0.5,-0.5,-0.5,-0.5,0.5,-0.5],
[-0.5,0.5,0.5,0.5,-0.5,-0.5]]),
}
| 43.935897
| 57
| 0.257368
| 774
| 3,427
| 1.139535
| 0.037468
| 0.816327
| 1.190476
| 1.587302
| 0.895692
| 0.895692
| 0.895692
| 0.895692
| 0.895692
| 0.895692
| 0
| 0.369254
| 0.409688
| 3,427
| 78
| 58
| 43.935897
| 0.066733
| 0.027137
| 0
| 0.709677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.016129
| 0
| 0.016129
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
5e9cbc5bffa5f5c2651608dd36490f2fa1865ce6
| 5,192
|
py
|
Python
|
tests/unit_gnmi_clinet/test_unit_gnmi_client.py
|
open-traffic-generator/otg-gnmi
|
77c33659df76a148fad9eda5950b09ed514fab30
|
[
"MIT"
] | 2
|
2021-12-20T22:10:51.000Z
|
2022-03-17T04:13:08.000Z
|
tests/unit_gnmi_clinet/test_unit_gnmi_client.py
|
open-traffic-generator/otg-gnmi
|
77c33659df76a148fad9eda5950b09ed514fab30
|
[
"MIT"
] | 2
|
2021-11-30T13:34:50.000Z
|
2022-01-25T21:40:45.000Z
|
tests/unit_gnmi_clinet/test_unit_gnmi_client.py
|
open-traffic-generator/otg-gnmi
|
77c33659df76a148fad9eda5950b09ed514fab30
|
[
"MIT"
] | null | null | null |
from tests.utils.common import change_mockserver_status, create_new_session, crate_new_gnmi_server, kill_gnmi_server # noqa
def test_capabilites(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.capabilites()
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
def test_get(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.get()
assert(result is False)
finally:
kill_gnmi_server(gnmi_server)
def test_set(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.set()
assert(result is False)
finally:
kill_gnmi_server(gnmi_server)
def test_subscribe_port_metrics(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.subscribe(['port_metrics'])
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
def test_subscribe_flow_metrics(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.subscribe(['flow_metrics'])
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
def test_subscribe_flow_bgpv4_metrics(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.subscribe(['bgpv4_metrics'])
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
def test_subscribe_bgpv6_metrics(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.subscribe(['bgpv6_metrics'])
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
def test_subscribe_isis_metrics(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.subscribe(['isis_metrics'])
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
def test_subscribe_ipv4_neighbors_states(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.subscribe(['ipv4_neighbors'])
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
def test_subscribe_ipv6_neighbors_states(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.subscribe(['ipv6_neighbors'])
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
def test_subscribe_all(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.subscribe(
[
'port_metrics',
'flow_metrics',
'bgpv4_metrics',
'bgpv6_metrics',
'isis_metrics'
]
)
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
def test_subscribe_port_and_flow(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.subscribe(['port_metrics', 'flow_metrics'])
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
def test_subscribe_port_and_protocol(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.subscribe(['port_metrics', 'bgpv4_metrics'])
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
def test_subscribe_flow_and_protocol(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.subscribe(['flow_metrics', 'bgpv4_metrics'])
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
def test_subscribe_multiple_protocol(snappiserver):
gnmi_server = crate_new_gnmi_server()
try:
session = create_new_session()
change_mockserver_status(200, False)
result = session.subscribe(
[
'bgpv4_metrics',
'bgpv6_metrics',
'isis_metrics'
]
)
assert(result is True)
finally:
kill_gnmi_server(gnmi_server)
| 28.685083
| 123
| 0.667951
| 585
| 5,192
| 5.519658
| 0.076923
| 0.19201
| 0.109012
| 0.089192
| 0.940849
| 0.940849
| 0.940849
| 0.940849
| 0.940849
| 0.940849
| 0
| 0.01521
| 0.252889
| 5,192
| 180
| 124
| 28.844444
| 0.817221
| 0.00077
| 0
| 0.753333
| 0
| 0
| 0.050906
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.006667
| 0
| 0.106667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0de0385d4d1599048e84854cb2fba39077452396
| 20,107
|
py
|
Python
|
typings/bl_ui/space_toolsystem_toolbar.py
|
Argmaster/PyR3
|
6786bcb6a101fe4bd4cc50fe43767b8178504b15
|
[
"MIT"
] | 2
|
2021-12-12T18:51:52.000Z
|
2022-02-23T09:49:16.000Z
|
typings/bl_ui/space_toolsystem_toolbar.py
|
Argmaster/PyR3
|
6786bcb6a101fe4bd4cc50fe43767b8178504b15
|
[
"MIT"
] | 2
|
2021-11-08T12:09:02.000Z
|
2021-12-12T23:01:12.000Z
|
typings/bl_ui/space_toolsystem_toolbar.py
|
Argmaster/PyR3
|
6786bcb6a101fe4bd4cc50fe43767b8178504b15
|
[
"MIT"
] | null | null | null |
import sys
import typing
import bl_ui.space_toolsystem_common
import bpy_types
class IMAGE_PT_tools_active(
bl_ui.space_toolsystem_common.ToolSelectPanelHelper, bpy_types.Panel,
bpy_types._GenericUI):
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
keymap_prefix = None
''' '''
tool_fallback_id = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_active_tool_fallback(self, context, layout, tool,
is_horizontal_layout):
'''
'''
pass
def draw_active_tool_header(self, context, layout, show_tool_name,
tool_key):
'''
'''
pass
def draw_cls(self, layout, context, detect_layout, scale_y):
'''
'''
pass
def draw_fallback_tool_items(self, layout, context):
'''
'''
pass
def draw_fallback_tool_items_for_pie_menu(self, layout, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keymap_ui_hierarchy(self, context_mode):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def register(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def tool_active_from_context(self, context):
'''
'''
pass
def tools_all(self):
'''
'''
pass
def tools_from_context(self, context, mode):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class NODE_PT_tools_active(bl_ui.space_toolsystem_common.ToolSelectPanelHelper,
bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
keymap_prefix = None
''' '''
tool_fallback_id = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_active_tool_fallback(self, context, layout, tool,
is_horizontal_layout):
'''
'''
pass
def draw_active_tool_header(self, context, layout, show_tool_name,
tool_key):
'''
'''
pass
def draw_cls(self, layout, context, detect_layout, scale_y):
'''
'''
pass
def draw_fallback_tool_items(self, layout, context):
'''
'''
pass
def draw_fallback_tool_items_for_pie_menu(self, layout, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keymap_ui_hierarchy(self, context_mode):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def register(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def tool_active_from_context(self, context):
'''
'''
pass
def tools_all(self):
'''
'''
pass
def tools_from_context(self, context, mode):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_tools_active(
bl_ui.space_toolsystem_common.ToolSelectPanelHelper, bpy_types.Panel,
bpy_types._GenericUI):
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
keymap_prefix = None
''' '''
tool_fallback_id = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_active_tool_fallback(self, context, layout, tool,
is_horizontal_layout):
'''
'''
pass
def draw_active_tool_header(self, context, layout, show_tool_name,
tool_key):
'''
'''
pass
def draw_cls(self, layout, context, detect_layout, scale_y):
'''
'''
pass
def draw_fallback_tool_items(self, layout, context):
'''
'''
pass
def draw_fallback_tool_items_for_pie_menu(self, layout, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keymap_ui_hierarchy(self, context_mode):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def register(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def tool_active_from_context(self, context):
'''
'''
pass
def tools_all(self):
'''
'''
pass
def tools_from_context(self, context, mode):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_tools_active(
bl_ui.space_toolsystem_common.ToolSelectPanelHelper, bpy_types.Panel,
bpy_types._GenericUI):
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
keymap_prefix = None
''' '''
tool_fallback_id = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_active_tool_fallback(self, context, layout, tool,
is_horizontal_layout):
'''
'''
pass
def draw_active_tool_header(self, context, layout, show_tool_name,
tool_key):
'''
'''
pass
def draw_cls(self, layout, context, detect_layout, scale_y):
'''
'''
pass
def draw_fallback_tool_items(self, layout, context):
'''
'''
pass
def draw_fallback_tool_items_for_pie_menu(self, layout, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keymap_ui_hierarchy(self, context_mode):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def register(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def tool_active_from_context(self, context):
'''
'''
pass
def tools_all(self):
'''
'''
pass
def tools_from_context(self, context, mode):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class _defs_annotate:
eraser = None
''' '''
line = None
''' '''
poly = None
''' '''
scribble = None
''' '''
def draw_settings_common(self, context, layout, tool):
'''
'''
pass
class _defs_edit_armature:
bone_envelope = None
''' '''
bone_size = None
''' '''
extrude = None
''' '''
extrude_cursor = None
''' '''
roll = None
''' '''
class _defs_edit_curve:
curve_radius = None
''' '''
curve_vertex_randomize = None
''' '''
draw = None
''' '''
extrude = None
''' '''
extrude_cursor = None
''' '''
tilt = None
''' '''
class _defs_edit_mesh:
bevel = None
''' '''
bisect = None
''' '''
edge_slide = None
''' '''
extrude = None
''' '''
extrude_cursor = None
''' '''
extrude_individual = None
''' '''
extrude_manifold = None
''' '''
extrude_normals = None
''' '''
inset = None
''' '''
knife = None
''' '''
loopcut_slide = None
''' '''
offset_edge_loops_slide = None
''' '''
poly_build = None
''' '''
push_pull = None
''' '''
rip_edge = None
''' '''
rip_region = None
''' '''
shrink_fatten = None
''' '''
spin = None
''' '''
spin_duplicate = None
''' '''
tosphere = None
''' '''
vert_slide = None
''' '''
vertex_randomize = None
''' '''
vertex_smooth = None
''' '''
class _defs_gpencil_edit:
bend = None
''' '''
box_select = None
''' '''
circle_select = None
''' '''
extrude = None
''' '''
interpolate = None
''' '''
lasso_select = None
''' '''
radius = None
''' '''
select = None
''' '''
shear = None
''' '''
tosphere = None
''' '''
transform_fill = None
''' '''
def is_segment(self, context):
'''
'''
pass
class _defs_gpencil_paint:
arc = None
''' '''
box = None
''' '''
circle = None
''' '''
curve = None
''' '''
cutter = None
''' '''
eyedropper = None
''' '''
interpolate = None
''' '''
line = None
''' '''
polyline = None
''' '''
def generate_from_brushes(self, context):
'''
'''
pass
def gpencil_primitive_toolbar(self, context, layout, _tool, props):
'''
'''
pass
class _defs_gpencil_sculpt:
def generate_from_brushes(self, context):
'''
'''
pass
def poll_select_mask(self, context):
'''
'''
pass
class _defs_gpencil_vertex:
def generate_from_brushes(self, context):
'''
'''
pass
def poll_select_mask(self, context):
'''
'''
pass
class _defs_gpencil_weight:
def generate_from_brushes(self, context):
'''
'''
pass
class _defs_image_generic:
cursor = None
''' '''
sample = None
''' '''
def poll_uvedit(self, context):
'''
'''
pass
class _defs_image_uv_edit:
rip_region = None
''' '''
class _defs_image_uv_sculpt:
def generate_from_brushes(self, context):
'''
'''
pass
class _defs_image_uv_select:
box = None
''' '''
circle = None
''' '''
lasso = None
''' '''
select = None
''' '''
class _defs_image_uv_transform:
rotate = None
''' '''
scale = None
''' '''
transform = None
''' '''
translate = None
''' '''
class _defs_node_edit:
links_cut = None
''' '''
class _defs_node_select:
box = None
''' '''
circle = None
''' '''
lasso = None
''' '''
select = None
''' '''
class _defs_particle:
def generate_from_brushes(self, context):
'''
'''
pass
class _defs_pose:
breakdown = None
''' '''
push = None
''' '''
relax = None
''' '''
class _defs_sculpt:
cloth_filter = None
''' '''
color_filter = None
''' '''
face_set_box = None
''' '''
face_set_edit = None
''' '''
face_set_lasso = None
''' '''
hide_border = None
''' '''
mask_border = None
''' '''
mask_by_color = None
''' '''
mask_lasso = None
''' '''
mask_line = None
''' '''
mesh_filter = None
''' '''
project_line = None
''' '''
trim_box = None
''' '''
trim_lasso = None
''' '''
def generate_from_brushes(self, context):
'''
'''
pass
class _defs_sequencer_generic:
blade = None
''' '''
sample = None
''' '''
class _defs_sequencer_select:
box = None
''' '''
select = None
''' '''
class _defs_texture_paint:
def generate_from_brushes(self, context):
'''
'''
pass
def poll_select_mask(self, context):
'''
'''
pass
class _defs_transform:
rotate = None
''' '''
scale = None
''' '''
scale_cage = None
''' '''
shear = None
''' '''
transform = None
''' '''
translate = None
''' '''
class _defs_vertex_paint:
def generate_from_brushes(self, context):
'''
'''
pass
def poll_select_mask(self, context):
'''
'''
pass
class _defs_view3d_add:
cone_add = None
''' '''
cube_add = None
''' '''
cylinder_add = None
''' '''
ico_sphere_add = None
''' '''
uv_sphere_add = None
''' '''
def description_interactive_add(self, context, _item, _km, prefix):
'''
'''
pass
def draw_settings_interactive_add(self, layout, tool, extra):
'''
'''
pass
class _defs_view3d_generic:
cursor = None
''' '''
cursor_click = None
''' '''
ruler = None
''' '''
class _defs_view3d_select:
box = None
''' '''
circle = None
''' '''
lasso = None
''' '''
select = None
''' '''
class _defs_weight_paint:
gradient = None
''' '''
sample_weight = None
''' '''
sample_weight_group = None
''' '''
def generate_from_brushes(self, context):
'''
'''
pass
def poll_select_mask(self, context):
'''
'''
pass
class _template_widget:
def VIEW3D_GGT_xform_extrude(self):
'''
'''
pass
def VIEW3D_GGT_xform_gizmo(self):
'''
'''
pass
def generate_from_enum_ex(_context, idname_prefix, icon_prefix, type, attr,
cursor, tooldef_keywords, exclude_filter):
'''
'''
pass
def kmi_to_string_or_none(kmi):
'''
'''
pass
| 12.980633
| 79
| 0.42796
| 1,724
| 20,107
| 4.671114
| 0.12181
| 0.130386
| 0.128399
| 0.032286
| 0.766671
| 0.7509
| 0.73513
| 0.721346
| 0.719359
| 0.701974
| 0
| 0.000528
| 0.434923
| 20,107
| 1,548
| 80
| 12.989018
| 0.708238
| 0
| 0
| 0.779221
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.313544
| false
| 0.313544
| 0.007421
| 0
| 0.656772
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 9
|
217d341db2d3c24644e33cc22b0867a8231360df
| 4,987
|
py
|
Python
|
tests/test_jarm.py
|
PaloAltoNetworks/pyjarm
|
ecf16e1ca339ab7cfc3d5f9efa11b93f528a3097
|
[
"0BSD"
] | 26
|
2021-01-16T13:16:32.000Z
|
2022-03-29T07:45:09.000Z
|
tests/test_jarm.py
|
PaloAltoNetworks/pyjarm
|
ecf16e1ca339ab7cfc3d5f9efa11b93f528a3097
|
[
"0BSD"
] | 4
|
2021-01-29T09:28:43.000Z
|
2021-05-29T15:16:57.000Z
|
tests/test_jarm.py
|
PaloAltoNetworks/pyjarm
|
ecf16e1ca339ab7cfc3d5f9efa11b93f528a3097
|
[
"0BSD"
] | 5
|
2021-01-15T17:09:28.000Z
|
2021-10-16T19:17:33.000Z
|
from mocket import Mocket
import socket
import os
import asyncio
from jarm.scanner.scanner import Scanner
from jarm.proxy.proxy import Proxy
def test_scanner_google_noproxy_ipv4_sync(mocker):
fqdn = "google.com"
ip = "142.250.184.174"
port = 443
MOCK_JARM = "27d40d40d29d40d1dc42d43d00041d4689ee210389f4f6b4b5b1b93f92252d"
family = socket.AF_INET
TEST_NAME = "google_com_443_noproxy_ipv4"
mocker.patch(
"os.urandom",
return_value=b"\x17]\x18r\xb2\xe7\x14L\x82\x9anR\xe59{D\xb9\xf8\xb2P\x9cd\xb5\x03g3<\x99)\x176n",
)
mocker.patch("random.choice", return_value=b"\x5a\x5a")
mocker.patch(
"socket.getaddrinfo",
return_value=[(family, socket.SOCK_STREAM, socket.IPPROTO_TCP, "", (ip, port))],
)
Mocket.enable(TEST_NAME, "./tests/data")
jarm = Scanner.scan(fqdn, port, address_family=family, concurrency=1)
assert jarm == (MOCK_JARM, fqdn, port)
def test_scanner_google_noproxy_ipv4(mocker):
fqdn = "google.com"
ip = "142.250.184.174"
port = 443
MOCK_JARM = "27d40d40d29d40d1dc42d43d00041d4689ee210389f4f6b4b5b1b93f92252d"
family = socket.AF_INET
TEST_NAME = "google_com_443_noproxy_ipv4"
mocker.patch(
"os.urandom",
return_value=b"\x17]\x18r\xb2\xe7\x14L\x82\x9anR\xe59{D\xb9\xf8\xb2P\x9cd\xb5\x03g3<\x99)\x176n",
)
mocker.patch("random.choice", return_value=b"\x5a\x5a")
mocker.patch(
"socket.getaddrinfo",
return_value=[(family, socket.SOCK_STREAM, socket.IPPROTO_TCP, "", (ip, port))],
)
Mocket.enable(TEST_NAME, "./tests/data")
jarm = asyncio.run(
Scanner.scan_async(fqdn, port, address_family=family, concurrency=1)
)
assert jarm == (MOCK_JARM, fqdn, port)
def test_scanner_google_httpproxy_param_ipv4(mocker):
fqdn = "google.com"
port = 443
MOCK_JARM = "27d40d40d29d40d1dc42d43d00041d4689ee210389f4f6b4b5b1b93f92252d"
family = socket.AF_INET
TEST_NAME = "google_com_443_httpproxy_param_ipv4"
proxy = "http://user:pass@127.0.0.1:3128"
global conn_idx
conn_idx = 0
def get_user_agent():
global conn_idx
print(f"Called at {conn_idx}")
hdr = {"User-Agent": f"pyJARM/UnitTest/{TEST_NAME}/{conn_idx}"}
conn_idx += 1
return hdr
mocker.patch(
"os.urandom",
return_value=b"\x17]\x18r\xb2\xe7\x14L\x82\x9anR\xe59{D\xb9\xf8\xb2P\x9cd\xb5\x03g3<\x99)\x176n",
)
mocker.patch("random.choice", return_value=b"\x5a\x5a")
mocker.patch.object(Proxy, "get_http_headers", side_effect=get_user_agent)
Mocket.enable(TEST_NAME, "./tests/data")
jarm = asyncio.run(
Scanner.scan_async(
fqdn, port, proxy=proxy, address_family=family, concurrency=1
)
)
assert jarm == (MOCK_JARM, fqdn, port)
def test_scanner_google_httpproxy_env_ipv4(mocker):
fqdn = "google.com"
port = 443
MOCK_JARM = "27d40d40d29d40d1dc42d43d00041d4689ee210389f4f6b4b5b1b93f92252d"
family = socket.AF_INET
TEST_NAME = "google_com_443_httpproxy_env_ipv4"
os.environ["HTTPS_PROXY"] = "http://user:pass@127.0.0.1:3128"
global conn_idx
conn_idx = 0
def get_user_agent():
global conn_idx
print(f"Called at {conn_idx}")
hdr = {"User-Agent": f"pyJARM/UnitTest/{TEST_NAME}/{conn_idx}"}
conn_idx += 1
return hdr
mocker.patch(
"os.urandom",
return_value=b"\x17]\x18r\xb2\xe7\x14L\x82\x9anR\xe59{D\xb9\xf8\xb2P\x9cd\xb5\x03g3<\x99)\x176n",
)
mocker.patch("random.choice", return_value=b"\x5a\x5a")
mocker.patch.object(Proxy, "get_http_headers", side_effect=get_user_agent)
Mocket.enable(TEST_NAME, "./tests/data")
jarm = asyncio.run(
Scanner.scan_async(fqdn, port, address_family=family, concurrency=1)
)
assert jarm == (MOCK_JARM, fqdn, port)
def test_scanner_google_ignoreproxy_env_ipv4(mocker):
fqdn = "google.com"
port = 443
MOCK_JARM = "27d40d40d29d40d1dc42d43d00041d4689ee210389f4f6b4b5b1b93f92252d"
family = socket.AF_INET
TEST_NAME = "google_com_443_ignoreproxy_env_ipv4"
os.environ["HTTPS_PROXY"] = "http://user:pass@127.0.0.1:3128"
proxy = "ignore"
global conn_idx
conn_idx = 0
def get_user_agent():
global conn_idx
print(f"Called at {conn_idx}")
hdr = {"User-Agent": f"pyJARM/UnitTest/{TEST_NAME}/{conn_idx}"}
conn_idx += 1
return hdr
mocker.patch(
"os.urandom",
return_value=b"\x17]\x18r\xb2\xe7\x14L\x82\x9anR\xe59{D\xb9\xf8\xb2P\x9cd\xb5\x03g3<\x99)\x176n",
)
mocker.patch("random.choice", return_value=b"\x5a\x5a")
mocker.patch.object(Proxy, "get_http_headers", side_effect=get_user_agent)
Mocket.enable(TEST_NAME, "./tests/data")
jarm = asyncio.run(
Scanner.scan_async(
fqdn, port, proxy=proxy, address_family=family, concurrency=1
)
)
assert jarm == (MOCK_JARM, fqdn, port)
| 30.783951
| 105
| 0.671145
| 663
| 4,987
| 4.835596
| 0.15083
| 0.039301
| 0.03743
| 0.026201
| 0.943543
| 0.943543
| 0.930443
| 0.930443
| 0.930443
| 0.930443
| 0
| 0.113693
| 0.197514
| 4,987
| 161
| 106
| 30.975155
| 0.687406
| 0
| 0
| 0.767442
| 0
| 0.03876
| 0.315019
| 0.196711
| 0
| 0
| 0
| 0
| 0.03876
| 1
| 0.062016
| false
| 0.023256
| 0.046512
| 0
| 0.131783
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
218ecb262f844770804c70a0ccbdad0aa2018e19
| 17,355
|
py
|
Python
|
anatool/experiment/timeweb.py
|
spacelis/anatool
|
06e7c95918222735f6b1a7339e9270c7692ebf74
|
[
"MIT"
] | 1
|
2015-03-29T11:47:34.000Z
|
2015-03-29T11:47:34.000Z
|
anatool/experiment/timeweb.py
|
spacelis/anatool
|
06e7c95918222735f6b1a7339e9270c7692ebf74
|
[
"MIT"
] | null | null | null |
anatool/experiment/timeweb.py
|
spacelis/anatool
|
06e7c95918222735f6b1a7339e9270c7692ebf74
|
[
"MIT"
] | null | null | null |
#!python
# -*- coding: utf-8 -*-
"""File: timeweb.py
Description:
History:
0.1.0 The first version.
"""
__version__ = '0.1.0'
__author__ = 'SpaceLis'
from matplotlib import pyplot as plt
from anatool.analysis.timemodel import TimeModel
from anatool.analysis.textmodel import LanguageModel
from anatool.analysis.ranking import ranke, linearjoin, randranke
from anatool.analysis.evaluation import batcheval, wilcoxontest, placetotalrank
from anatool.dm.dataset import Dataset, loadrows, place_name
from anatool.dm.db import GEOTWEET
import seaborn as sns
sns.set_palette("deep", desat=.6)
sns.set_style("white")
sns.set_context(font_scale=1.5, rc={"figure.figsize": (3, 2), 'axes.grid': False, 'axes.linewidth': 1,})
def cmptimeweb(cities, numtwts, numtest):
""" compare the time model + web model to original pure text model
"""
lmranks = [list() for i in range(len(numtwts))]
tmranks = [list() for i in range(len(numtwts))]
wmranks = list()
randranks = list()
lmtmranks = [list() for i in range(len(numtwts))]
wmlmranks = [list() for i in range(len(numtwts))]
wmlmtmranks = [list() for i in range(len(numtwts))]
test = Dataset()
for places in cities:
lms = [dict() for i in range(len(numtwts))]
tms = [dict() for i in range(len(numtwts))]
wms = dict()
tst = Dataset()
for pid in places:
twtp = loadrows(GEOTWEET, ('place_id', 'text', 'created_at'),
('place_id=\'{0}\''.format(pid),), 'sample',
'order by rand() limit {0}'.format(max(numtwts) + numtest))
for i in range(len(numtwts)):
lms[i][pid] = LanguageModel(twtp['text'][:numtwts[i]])
tms[i][pid] = TimeModel(twtp['created_at'][:numtwts[i]])
web = loadrows(GEOTWEET, ('place_id', 'web'),
('place_id=\'{0}\''.format(pid),), 'web',
'order by rand() limit 30')
wms[pid] = LanguageModel(web['web'])
# test data
for i in range(max(numtwts), max(numtwts) + numtest):
tst.append({'label': pid,
'lm': LanguageModel([twtp['text'][i],]),
'tm': TimeModel([twtp['created_at'][i],])})
test.extend(tst)
# rank
for item in tst:
for i in range(len(numtwts)):
lmranks[i].append(ranke(lms[i], item['lm']))
tmranks[i].append(ranke(tms[i], item['tm']))
wmranks.append(ranke(wms, item['lm']))
randranks.append(randranke(places))
for i in range(len(numtwts)):
for ranklm, ranktm in zip(lmranks[i], tmranks[i]):
lmtmranks[i].append(linearjoin([ranklm, ranktm], [0.5, 0.5]))
for ranklm, rankwm in zip(lmranks[i], wmranks):
wmlmranks[i].append(linearjoin([ranklm, rankwm], [0.5, 0.5]))
for ranklm, ranktm, rankwm in zip(lmranks[i], tmranks[i], wmranks):
wmlmtmranks[i].append(\
linearjoin([ranklm, ranktm, rankwm], [0.33, 0.33, 0.33]))
# plot
candls = ['-', '--']
mks = ['o', '^', '*', 'v', 's']
#for i in range(len(numtwts)):
#lmeval = batcheval(lmranks[i], test['label'])
#plt.plot(lmeval['pos'], lmeval['rate'],
#label='tweet(s={0})'.format(numtwts[i]),
#ls=candls[i%2], marker=mks[i/2])
#for i in range(len(numtwts)):
#for plc in placetotalrank(lmranks[i], test)['label'][-10:]:
#print place_name(plc), plc
#print placetotalrank(lmranks[i], test)['totalrank'][-10:]
#print wilcoxontest(lmranks[i], lmranks[i-1], test)
#plt.legend(loc='lower right')
#---------------------------------------------------------------
for i in range(len(numtwts)):
lmeval = batcheval(lmranks[i], test['label'])
plt.plot(lmeval['pos'], lmeval['rate'],
label='tweet(s={0})'.format(numtwts[i]),
ls=candls[i], marker='o')
wmlmeval = batcheval(wmlmranks[i], test['label'])
plt.plot(wmlmeval['pos'], wmlmeval['rate'],
label='tweet(s={0})+web'.format(numtwts[i]),
ls=candls[i], marker='^')
print wilcoxontest(lmranks[i], wmlmranks[i], test)
for plc in placetotalrank(wmlmranks[i], test)['label'][-10:]:
print place_name(plc), plc
print placetotalrank(wmlmranks[i], test)['totalrank'][-10:]
wmeval = batcheval(wmranks, test['label'])
for plc in placetotalrank(wmranks, test)['label'][-10:]:
print place_name(plc), plc
print placetotalrank(wmranks, test)['totalrank'][-10:]
plt.plot(wmeval['pos'], wmeval['rate'],
label='web',
ls=':')
plt.plot(lmeval['pos'], [float(r) / max(lmeval['pos']) for r in lmeval['pos']],
ls='-.', marker='s',
label='Random Baseline')
#---------------------------------------------------------------
#for i in range(len(numtwts)):
#plt.subplot(121 + i)
#plt.title('$s={0}$'.format(numtwts[i]))
#lmeval = batcheval(lmranks[i], test['label'])
#plt.plot(lmeval['pos'], lmeval['rate'],
#label='tweet',
#ls=candls[i], marker='o')
#lmtmeval = batcheval(lmtmranks[i], test['label'])
#plt.plot(lmtmeval['pos'], lmtmeval['rate'],
#label='tweet+time',
#ls=candls[i], marker='^')
#wmlmtmeval = batcheval(wmlmtmranks[i], test['label'])
#plt.plot(wmlmtmeval['pos'], wmlmtmeval['rate'],
#label='tweet+time+web',
#ls=candls[i], marker='*')
#plt.legend(loc='lower right')
#plt.ylabel('Rate containing Reference POI')
#plt.xlabel('Top $p$ places')
#plt.show()
#---------------------------------------------------------------
#i=0
#plt.subplot(121 + i)
#plt.title('$s={0}$'.format(numtwts[i]))
#tmeval = batcheval(tmranks[i], test['label'])
#plt.plot(tmeval['pos'], tmeval['rate'],
#label='time',
#ls=candls[i], marker='o')
#lmeval = batcheval(lmranks[i], test['label'])
#plt.plot(lmeval['pos'], lmeval['rate'],
#label='tweet',
#ls=candls[i], marker='^')
#lmtmeval = batcheval(lmtmranks[i], test['label'])
#plt.plot(lmtmeval['pos'], lmtmeval['rate'],
#label='tweet+time',
#ls=candls[i], marker='*')
#for plc in placetotalrank(tmranks[i], test)['label'][-10:]:
#print place_name(plc), plc
#print placetotalrank(tmranks[i], test)['totalrank'][-10:]
#for plc in placetotalrank(lmtmranks[i], test)['label'][-10:]:
#print place_name(plc), plc
#print placetotalrank(lmtmranks[i], test)['totalrank'][-10:]
#print wilcoxontest(lmranks[i], lmtmranks[i], test)
#plt.legend(loc='lower right')
#plt.ylabel('Rate containing Reference POI')
#plt.xlabel('Top $p$ places')
#i=1
#plt.subplot(121 + i)
#plt.title('$s={0}$'.format(numtwts[i]))
#tmeval = batcheval(tmranks[i], test['label'])
#plt.plot(tmeval['pos'], tmeval['rate'],
#label='time',
#ls=candls[i], marker='o')
#wmlmeval = batcheval(wmlmranks[i], test['label'])
#plt.plot(wmlmeval['pos'], wmlmeval['rate'],
#label='tweet + web',
#ls=candls[i], marker='^')
#wmlmtmeval = batcheval(wmlmtmranks[i], test['label'])
#plt.plot(wmlmtmeval['pos'], wmlmtmeval['rate'],
#label='tweet+time+web',
#ls=candls[i], marker='*')
#for plc in placetotalrank(wmlmranks[i], test)['label'][-10:]:
#print place_name(plc), plc
#print placetotalrank(wmlmranks[i], test)['totalrank'][-10:]
#for plc in placetotalrank(wmlmtmranks[i], test)['label'][-10:]:
#print place_name(plc), plc
#print placetotalrank(wmlmtmranks[i], test)['totalrank'][-10:]
#print wilcoxontest(wmlmranks[i], wmlmtmranks[i], test)
plt.legend(loc='lower right')
plt.ylabel('Rate containing Reference POI')
plt.xlabel('Top $p$ places')
plt.show()
def cmpsparsecombine(cities, numtwts, numtest):
""" the combined model performance under the influence of sparseness
"""
lmranks = [list() for i in range(len(numtwts))]
tmranks = [list() for i in range(len(numtwts))]
wmranks = list()
randranks = list()
lmtmranks = [list() for i in range(len(numtwts))]
wmlmranks = [list() for i in range(len(numtwts))]
wmlmtmranks = [list() for i in range(len(numtwts))]
test = Dataset()
for places in cities:
lms = [dict() for i in range(len(numtwts))]
tms = [dict() for i in range(len(numtwts))]
wms = dict()
tst = Dataset()
for pid in places:
twtp = loadrows(GEOTWEET, ('place_id', 'text', 'created_at'),
('place_id=\'{0}\''.format(pid),), 'sample',
'order by rand() limit {0}'.format(max(numtwts) + numtest))
for i in range(len(numtwts)):
lms[i][pid] = LanguageModel(twtp['text'][:numtwts[i]])
tms[i][pid] = TimeModel(twtp['created_at'][:numtwts[i]])
web = loadrows(GEOTWEET, ('place_id', 'web'),
('place_id=\'{0}\''.format(pid),), 'web',
'order by rand() limit 30')
wms[pid] = LanguageModel(web['web'])
# test data
for i in range(max(numtwts), max(numtwts) + numtest):
tst.append({'label': pid,
'lm': LanguageModel([twtp['text'][i],]),
'tm': TimeModel([twtp['created_at'][i],])})
test.extend(tst)
# rank
for item in tst:
for i in range(len(numtwts)):
lmranks[i].append(ranke(lms[i], item['lm']))
tmranks[i].append(ranke(tms[i], item['tm']))
wmranks.append(ranke(wms, item['lm']))
randranks.append(randranke(places))
for i in range(len(numtwts)):
for ranklm, ranktm in zip(lmranks[i], tmranks[i]):
lmtmranks[i].append(linearjoin([ranklm, ranktm], [0.5, 0.5]))
for ranklm, rankwm in zip(lmranks[i], wmranks):
wmlmranks[i].append(linearjoin([ranklm, rankwm], [0.5, 0.5]))
for ranklm, ranktm, rankwm in zip(lmranks[i], tmranks[i], wmranks):
wmlmtmranks[i].append(\
linearjoin([ranklm, ranktm, rankwm], [0.33, 0.33, 0.33]))
# plot
candls = ['-', '--']
mks = ['o', '^', '*', 'v', 's']
i=0
plt.subplot(121 + i)
plt.title('$s={0}$'.format(numtwts[i]))
tmeval = batcheval(tmranks[i], test['label'])
plt.plot(tmeval['pos'], tmeval['rate'],
label='time',
ls=candls[i], marker='o')
lmeval = batcheval(lmranks[i], test['label'])
plt.plot(lmeval['pos'], lmeval['rate'],
label='tweet',
ls=candls[i], marker='^')
lmtmeval = batcheval(lmtmranks[i], test['label'])
plt.plot(lmtmeval['pos'], lmtmeval['rate'],
label='tweet+time',
ls=candls[i], marker='*')
for plc in placetotalrank(tmranks[i], test)['label'][-10:]:
print place_name(plc), plc
print placetotalrank(tmranks[i], test)['totalrank'][-10:]
for plc in placetotalrank(lmtmranks[i], test)['label'][-10:]:
print place_name(plc), plc
print placetotalrank(lmtmranks[i], test)['totalrank'][-10:]
print wilcoxontest(lmranks[i], lmtmranks[i], test)
plt.plot(lmeval['pos'], [float(r) / max(lmeval['pos']) for r in lmeval['pos']],
ls='-.', marker='s',
label='Random Baseline')
plt.legend(loc='lower right')
plt.ylabel('Rate containing Reference POI')
plt.xlabel('Top $p$ places')
i=1
plt.subplot(121 + i)
plt.title('$s={0}$'.format(numtwts[i]))
tmeval = batcheval(tmranks[i], test['label'])
plt.plot(tmeval['pos'], tmeval['rate'],
label='time',
ls=candls[i], marker='o')
wmlmeval = batcheval(wmlmranks[i], test['label'])
plt.plot(wmlmeval['pos'], wmlmeval['rate'],
label='tweet + web',
ls=candls[i], marker='^')
wmlmtmeval = batcheval(wmlmtmranks[i], test['label'])
plt.plot(wmlmtmeval['pos'], wmlmtmeval['rate'],
label='tweet+time+web',
ls=candls[i], marker='*')
for plc in placetotalrank(wmlmranks[i], test)['label'][-10:]:
print place_name(plc), plc
print placetotalrank(wmlmranks[i], test)['totalrank'][-10:]
for plc in placetotalrank(wmlmtmranks[i], test)['label'][-10:]:
print place_name(plc), plc
print placetotalrank(wmlmtmranks[i], test)['totalrank'][-10:]
print wilcoxontest(wmlmranks[i], wmlmtmranks[i], test)
plt.plot(lmeval['pos'], [float(r) / max(lmeval['pos']) for r in lmeval['pos']],
ls='-.', marker='s',
label='Random Baseline')
plt.legend(loc='lower right')
plt.ylabel('Rate containing Reference POI')
plt.xlabel('Top $p$ places')
plt.show()
def cmpsparse(cities, numtwts, numtest):
""" Compare the model performance trained with different amount of tweets
"""
lmranks = [list() for i in range(len(numtwts))]
randranks = list()
lmtmranks = [list() for i in range(len(numtwts))]
test = Dataset()
for places in cities:
lms = [dict() for i in range(len(numtwts))]
tst = Dataset()
for pid in places:
twtp = loadrows(GEOTWEET, ('place_id', 'text', 'created_at'),
('place_id=\'{0}\''.format(pid),), 'sample',
'order by rand() limit {0}'.format(max(numtwts) + numtest))
for i in range(len(numtwts)):
lms[i][pid] = LanguageModel(twtp['text'][:numtwts[i]])
# test data
for i in range(max(numtwts), max(numtwts) + numtest):
tst.append({'label': pid,
'lm': LanguageModel([twtp['text'][i],]),
})
test.extend(tst)
# rank
for item in tst:
for i in range(len(numtwts)):
lmranks[i].append(ranke(lms[i], item['lm']))
randranks.append(randranke(places))
# plot
candls = ['-', '--']
mks = ['o', '^', '*', 'v', 's']
for i, n in enumerate(numtwts):
lmeval = batcheval(lmranks[i], test['label'])
plt.plot(lmeval['pos'], lmeval['rate'],
label='tweet(s={0})'.format(n),
marker=mks[i])
plt.plot(lmeval['pos'], [float(r) / max(lmeval['pos']) for r in lmeval['pos']],
ls='-.', marker='s',
label='Random Baseline')
plt.legend(loc='lower right')
plt.ylabel('Rate containing Reference POI')
plt.xlabel('Top $p$ places')
plt.show()
def richrank(cities, names):
candls = ['-', '--']
mks = ['o', '^', '*']
for idx in range(len(cities)):
lms = dict()
test = Dataset()
for pid in cities[idx]:
twtp = loadrows(GEOTWEET, ('place_id', 'text', 'created_at'),
('place_id=\'{0}\''.format(pid),), 'sample',
'order by rand() limit 110')
lms[pid] = LanguageModel(twtp['text'][:100])
for cnt in range(100, 110):
test.append({'label': twtp['place_id'][cnt],
'lm': LanguageModel([twtp['text'][cnt],])})
lmranks = list()
randranks = list()
for twtlm in test:
lmranks.append(ranke(lms, twtlm['lm']))
randranks.append(randranke(cities[idx]))
lmeval = batcheval(lmranks, test['label'])
print names[idx], 'P@1', (lmeval['rate'][1] - 0.1)
plt.plot(lmeval['pos'], lmeval['rate'], ls=candls[idx%2], marker=mks[idx/2],
label='{0}($s=100$)'.format(names[idx]))
plt.plot(lmeval['pos'], [float(r) / max(lmeval['pos']) for r in lmeval['pos']],
ls='-.', marker='s',
label='Random Baseline')
plt.legend(loc='lower right')
plt.ylabel('Rate containing referece POI')
plt.xlabel('Top $p$ places')
plt.show()
def cntdist():
"""docstring for cntdist
"""
with open('cntdist.csv') as fin:
cnts = [int(cnt.strip()) for cnt in fin]
plt.loglog(range(len(cnts)), cnts)
plt.xlabel('POIs ordered by # of tweets')
plt.ylabel('# of tweets')
plt.show()
def run():
""" Test this module
"""
cities = list()
for city in ['ch10_web.lst', 'la10_web.lst', 'ny10_web.lst', 'sf10_web.lst']:
with open('data/' + city) as fin:
cities.append([p.strip() for p in fin])
# cmpsparse(cities, [100, 25, 10, 5], 10)
# cmpsparsecombine(cities, [100, 5], 10)
cmptimeweb(cities, [100, 5], 10)
# richrank(cities, ['Chicago', 'Los Angeles', 'New York', 'San Francisco'])
if __name__ == '__main__':
run()
| 40.173611
| 105
| 0.529761
| 2,048
| 17,355
| 4.461426
| 0.105469
| 0.026267
| 0.021013
| 0.038525
| 0.802123
| 0.795228
| 0.783955
| 0.778155
| 0.767976
| 0.764474
| 0
| 0.014903
| 0.276981
| 17,355
| 431
| 106
| 40.266821
| 0.713261
| 0.182714
| 0
| 0.704797
| 0
| 0
| 0.107836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.02952
| null | null | 0.059041
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
219f6ad2f2cec2bcb1fd88f7fe0356787c764f64
| 439
|
py
|
Python
|
tests/parser/true_negation.arity.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/true_negation.arity.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/true_negation.arity.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
% test 1
-p :- p(1).
q :- -q(1).
r(1) :- -r.
-s(1) :- s.
% test 2
t(1) :- not -t.
-u(1) :- not u.
% test 3
f(2).
g(1).
-f(X,X) :- g(X).
% test 4
mana(a).
-nemo.
nemo(X) :- mana(X).
"""
output = """
% test 1
-p :- p(1).
q :- -q(1).
r(1) :- -r.
-s(1) :- s.
% test 2
t(1) :- not -t.
-u(1) :- not u.
% test 3
f(2).
g(1).
-f(X,X) :- g(X).
% test 4
mana(a).
-nemo.
nemo(X) :- mana(X).
"""
| 10.209302
| 20
| 0.353075
| 90
| 439
| 1.722222
| 0.222222
| 0.051613
| 0.077419
| 0.090323
| 0.929032
| 0.929032
| 0.929032
| 0.929032
| 0.929032
| 0.929032
| 0
| 0.079208
| 0.309795
| 439
| 42
| 21
| 10.452381
| 0.432343
| 0
| 0
| 0.944444
| 0
| 0
| 0.922693
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
21e056e2f0efe73d54dc1fad0839d2fd244737f1
| 143
|
py
|
Python
|
keras/utils/vis_utils.py
|
ikingye/keras
|
1a3ee8441933fc007be6b2beb47af67998d50737
|
[
"MIT"
] | 5
|
2020-11-30T22:26:03.000Z
|
2020-12-01T22:34:25.000Z
|
keras/utils/vis_utils.py
|
ikingye/keras
|
1a3ee8441933fc007be6b2beb47af67998d50737
|
[
"MIT"
] | 10
|
2020-12-01T22:55:29.000Z
|
2020-12-11T18:31:46.000Z
|
keras/utils/vis_utils.py
|
ikingye/keras
|
1a3ee8441933fc007be6b2beb47af67998d50737
|
[
"MIT"
] | 15
|
2020-11-30T22:12:22.000Z
|
2020-12-09T01:32:48.000Z
|
"""Utilities related to model visualization."""
from tensorflow.keras.utils import model_to_dot
from tensorflow.keras.utils import plot_model
| 28.6
| 47
| 0.825175
| 20
| 143
| 5.75
| 0.6
| 0.243478
| 0.330435
| 0.417391
| 0.521739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097902
| 143
| 4
| 48
| 35.75
| 0.891473
| 0.286713
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
21f6b8e8f8168e1e01dce22f031be2e9c201c978
| 29,232
|
py
|
Python
|
pybind/slxos/v17s_1_02/brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17s_1_02/brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17s_1_02/brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class on_board(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface-ext - based on the path /brocade_interface_ext_rpc/get-media-detail/output/interface/on-board. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__speed','__connector','__encoding','__vendor_name','__vendor_oui','__vendor_pn','__vendor_rev',)
_yang_name = 'on-board'
_rest_name = 'on-board'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__vendor_rev = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-rev", rest_name="vendor-rev", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
self.__encoding = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'sonet-scrambled': {'value': 9}, u'4b5b': {'value': 6}, u'rz': {'value': 1}, u'8b10b': {'value': 4}, u'nrz': {'value': 2}, u'sonet': {'value': 3}, u'manchester': {'value': 7}, u'unknown': {'value': 10}, u'64b66b': {'value': 5}, u'ieee-802-3ab': {'value': 8}},), is_leaf=True, yang_name="encoding", rest_name="encoding", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
self.__vendor_oui = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-oui", rest_name="vendor-oui", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
self.__connector = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'no-separable-connector': {'value': 36}, u'mpo-parallel-optic': {'value': 12}, u'style-2-copper': {'value': 3}, u'mpo': {'value': 13}, u'fiber-jack': {'value': 6}, u'unknown': {'value': 35}, u'bnc-tnc': {'value': 4}, u'style-1-copper': {'value': 2}, u'mu': {'value': 9}, u'cat-5-copper-cable': {'value': 34}, u'copper-pigtail': {'value': 33}, u'optical-pigtail': {'value': 11}, u'coaxial': {'value': 5}, u'hssdc-ii': {'value': 32}, u'sc': {'value': 1}, u'sg': {'value': 10}, u'mt-rj': {'value': 8}, u'lc': {'value': 7}},), is_leaf=True, yang_name="connector", rest_name="connector", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
self.__vendor_pn = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-pn", rest_name="vendor-pn", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
self.__vendor_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-name", rest_name="vendor-name", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
self.__speed = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'8Gbps': {'value': 9}, u'nil': {'value': 1}, u'40Gbps': {'value': 5}, u'1Gbps': {'value': 3}, u'auto': {'value': 2}, u'25Gbps': {'value': 12}, u'10Gbps': {'value': 4}, u'4Gbps': {'value': 8}, u'100Gbps': {'value': 11}, u'100Mbps': {'value': 6}, u'16Gbps': {'value': 10}, u'2Gbps': {'value': 7}},), is_leaf=True, yang_name="speed", rest_name="speed", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='line-speed', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_interface_ext_rpc', u'get-media-detail', u'output', u'interface', u'on-board']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'get-media-detail', u'output', u'interface', u'on-board']
def _get_speed(self):
"""
Getter method for speed, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/speed (line-speed)
YANG Description: The actual line speed of this interface.
"""
return self.__speed
def _set_speed(self, v, load=False):
"""
Setter method for speed, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/speed (line-speed)
If this variable is read-only (config: false) in the
source YANG file, then _set_speed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_speed() directly.
YANG Description: The actual line speed of this interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'8Gbps': {'value': 9}, u'nil': {'value': 1}, u'40Gbps': {'value': 5}, u'1Gbps': {'value': 3}, u'auto': {'value': 2}, u'25Gbps': {'value': 12}, u'10Gbps': {'value': 4}, u'4Gbps': {'value': 8}, u'100Gbps': {'value': 11}, u'100Mbps': {'value': 6}, u'16Gbps': {'value': 10}, u'2Gbps': {'value': 7}},), is_leaf=True, yang_name="speed", rest_name="speed", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='line-speed', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """speed must be of a type compatible with line-speed""",
'defined-type': "brocade-interface-ext:line-speed",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'8Gbps': {'value': 9}, u'nil': {'value': 1}, u'40Gbps': {'value': 5}, u'1Gbps': {'value': 3}, u'auto': {'value': 2}, u'25Gbps': {'value': 12}, u'10Gbps': {'value': 4}, u'4Gbps': {'value': 8}, u'100Gbps': {'value': 11}, u'100Mbps': {'value': 6}, u'16Gbps': {'value': 10}, u'2Gbps': {'value': 7}},), is_leaf=True, yang_name="speed", rest_name="speed", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='line-speed', is_config=True)""",
})
self.__speed = t
if hasattr(self, '_set'):
self._set()
def _unset_speed(self):
self.__speed = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'8Gbps': {'value': 9}, u'nil': {'value': 1}, u'40Gbps': {'value': 5}, u'1Gbps': {'value': 3}, u'auto': {'value': 2}, u'25Gbps': {'value': 12}, u'10Gbps': {'value': 4}, u'4Gbps': {'value': 8}, u'100Gbps': {'value': 11}, u'100Mbps': {'value': 6}, u'16Gbps': {'value': 10}, u'2Gbps': {'value': 7}},), is_leaf=True, yang_name="speed", rest_name="speed", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='line-speed', is_config=True)
def _get_connector(self):
"""
Getter method for connector, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/connector (enumeration)
YANG Description: This specifies the type of connector
connected to the interface.
"""
return self.__connector
def _set_connector(self, v, load=False):
"""
Setter method for connector, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/connector (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_connector is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connector() directly.
YANG Description: This specifies the type of connector
connected to the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'no-separable-connector': {'value': 36}, u'mpo-parallel-optic': {'value': 12}, u'style-2-copper': {'value': 3}, u'mpo': {'value': 13}, u'fiber-jack': {'value': 6}, u'unknown': {'value': 35}, u'bnc-tnc': {'value': 4}, u'style-1-copper': {'value': 2}, u'mu': {'value': 9}, u'cat-5-copper-cable': {'value': 34}, u'copper-pigtail': {'value': 33}, u'optical-pigtail': {'value': 11}, u'coaxial': {'value': 5}, u'hssdc-ii': {'value': 32}, u'sc': {'value': 1}, u'sg': {'value': 10}, u'mt-rj': {'value': 8}, u'lc': {'value': 7}},), is_leaf=True, yang_name="connector", rest_name="connector", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connector must be of a type compatible with enumeration""",
'defined-type': "brocade-interface-ext:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'no-separable-connector': {'value': 36}, u'mpo-parallel-optic': {'value': 12}, u'style-2-copper': {'value': 3}, u'mpo': {'value': 13}, u'fiber-jack': {'value': 6}, u'unknown': {'value': 35}, u'bnc-tnc': {'value': 4}, u'style-1-copper': {'value': 2}, u'mu': {'value': 9}, u'cat-5-copper-cable': {'value': 34}, u'copper-pigtail': {'value': 33}, u'optical-pigtail': {'value': 11}, u'coaxial': {'value': 5}, u'hssdc-ii': {'value': 32}, u'sc': {'value': 1}, u'sg': {'value': 10}, u'mt-rj': {'value': 8}, u'lc': {'value': 7}},), is_leaf=True, yang_name="connector", rest_name="connector", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)""",
})
self.__connector = t
if hasattr(self, '_set'):
self._set()
def _unset_connector(self):
self.__connector = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'no-separable-connector': {'value': 36}, u'mpo-parallel-optic': {'value': 12}, u'style-2-copper': {'value': 3}, u'mpo': {'value': 13}, u'fiber-jack': {'value': 6}, u'unknown': {'value': 35}, u'bnc-tnc': {'value': 4}, u'style-1-copper': {'value': 2}, u'mu': {'value': 9}, u'cat-5-copper-cable': {'value': 34}, u'copper-pigtail': {'value': 33}, u'optical-pigtail': {'value': 11}, u'coaxial': {'value': 5}, u'hssdc-ii': {'value': 32}, u'sc': {'value': 1}, u'sg': {'value': 10}, u'mt-rj': {'value': 8}, u'lc': {'value': 7}},), is_leaf=True, yang_name="connector", rest_name="connector", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
def _get_encoding(self):
"""
Getter method for encoding, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/encoding (enumeration)
YANG Description: This indicates the type of encoding used to
transmit the data on this interface.
"""
return self.__encoding
def _set_encoding(self, v, load=False):
"""
Setter method for encoding, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/encoding (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_encoding is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_encoding() directly.
YANG Description: This indicates the type of encoding used to
transmit the data on this interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'sonet-scrambled': {'value': 9}, u'4b5b': {'value': 6}, u'rz': {'value': 1}, u'8b10b': {'value': 4}, u'nrz': {'value': 2}, u'sonet': {'value': 3}, u'manchester': {'value': 7}, u'unknown': {'value': 10}, u'64b66b': {'value': 5}, u'ieee-802-3ab': {'value': 8}},), is_leaf=True, yang_name="encoding", rest_name="encoding", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """encoding must be of a type compatible with enumeration""",
'defined-type': "brocade-interface-ext:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'sonet-scrambled': {'value': 9}, u'4b5b': {'value': 6}, u'rz': {'value': 1}, u'8b10b': {'value': 4}, u'nrz': {'value': 2}, u'sonet': {'value': 3}, u'manchester': {'value': 7}, u'unknown': {'value': 10}, u'64b66b': {'value': 5}, u'ieee-802-3ab': {'value': 8}},), is_leaf=True, yang_name="encoding", rest_name="encoding", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)""",
})
self.__encoding = t
if hasattr(self, '_set'):
self._set()
def _unset_encoding(self):
self.__encoding = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'sonet-scrambled': {'value': 9}, u'4b5b': {'value': 6}, u'rz': {'value': 1}, u'8b10b': {'value': 4}, u'nrz': {'value': 2}, u'sonet': {'value': 3}, u'manchester': {'value': 7}, u'unknown': {'value': 10}, u'64b66b': {'value': 5}, u'ieee-802-3ab': {'value': 8}},), is_leaf=True, yang_name="encoding", rest_name="encoding", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
def _get_vendor_name(self):
"""
Getter method for vendor_name, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/vendor_name (string)
YANG Description: This indicates the Vendor of this interface.
"""
return self.__vendor_name
def _set_vendor_name(self, v, load=False):
"""
Setter method for vendor_name, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/vendor_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vendor_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vendor_name() directly.
YANG Description: This indicates the Vendor of this interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="vendor-name", rest_name="vendor-name", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vendor_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-name", rest_name="vendor-name", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)""",
})
self.__vendor_name = t
if hasattr(self, '_set'):
self._set()
def _unset_vendor_name(self):
self.__vendor_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-name", rest_name="vendor-name", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
def _get_vendor_oui(self):
"""
Getter method for vendor_oui, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/vendor_oui (string)
YANG Description: This indicates the Vendor IEEE company ID.
"""
return self.__vendor_oui
def _set_vendor_oui(self, v, load=False):
"""
Setter method for vendor_oui, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/vendor_oui (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vendor_oui is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vendor_oui() directly.
YANG Description: This indicates the Vendor IEEE company ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="vendor-oui", rest_name="vendor-oui", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vendor_oui must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-oui", rest_name="vendor-oui", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)""",
})
self.__vendor_oui = t
if hasattr(self, '_set'):
self._set()
def _unset_vendor_oui(self):
self.__vendor_oui = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-oui", rest_name="vendor-oui", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
def _get_vendor_pn(self):
"""
Getter method for vendor_pn, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/vendor_pn (string)
YANG Description: This indicates the Part number.
"""
return self.__vendor_pn
def _set_vendor_pn(self, v, load=False):
"""
Setter method for vendor_pn, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/vendor_pn (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vendor_pn is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vendor_pn() directly.
YANG Description: This indicates the Part number.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="vendor-pn", rest_name="vendor-pn", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vendor_pn must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-pn", rest_name="vendor-pn", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)""",
})
self.__vendor_pn = t
if hasattr(self, '_set'):
self._set()
def _unset_vendor_pn(self):
self.__vendor_pn = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-pn", rest_name="vendor-pn", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
def _get_vendor_rev(self):
"""
Getter method for vendor_rev, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/vendor_rev (string)
YANG Description: This indicates the Revision level.
"""
return self.__vendor_rev
def _set_vendor_rev(self, v, load=False):
"""
Setter method for vendor_rev, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/on_board/vendor_rev (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vendor_rev is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vendor_rev() directly.
YANG Description: This indicates the Revision level.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="vendor-rev", rest_name="vendor-rev", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vendor_rev must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-rev", rest_name="vendor-rev", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)""",
})
self.__vendor_rev = t
if hasattr(self, '_set'):
self._set()
def _unset_vendor_rev(self):
self.__vendor_rev = YANGDynClass(base=unicode, is_leaf=True, yang_name="vendor-rev", rest_name="vendor-rev", parent=self, choice=(u'interface-identifier', u'on-board'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='string', is_config=True)
speed = __builtin__.property(_get_speed, _set_speed)
connector = __builtin__.property(_get_connector, _set_connector)
encoding = __builtin__.property(_get_encoding, _set_encoding)
vendor_name = __builtin__.property(_get_vendor_name, _set_vendor_name)
vendor_oui = __builtin__.property(_get_vendor_oui, _set_vendor_oui)
vendor_pn = __builtin__.property(_get_vendor_pn, _set_vendor_pn)
vendor_rev = __builtin__.property(_get_vendor_rev, _set_vendor_rev)
__choices__ = {u'interface-identifier': {u'on-board': [u'speed', u'connector', u'encoding', u'vendor_name', u'vendor_oui', u'vendor_pn', u'vendor_rev']}}
_pyangbind_elements = {'speed': speed, 'connector': connector, 'encoding': encoding, 'vendor_name': vendor_name, 'vendor_oui': vendor_oui, 'vendor_pn': vendor_pn, 'vendor_rev': vendor_rev, }
| 80.087671
| 1,073
| 0.692187
| 3,999
| 29,232
| 4.846462
| 0.063266
| 0.062742
| 0.074506
| 0.037408
| 0.873484
| 0.850627
| 0.841804
| 0.827718
| 0.82359
| 0.804706
| 0
| 0.01429
| 0.152573
| 29,232
| 364
| 1,074
| 80.307692
| 0.768085
| 0.170053
| 0
| 0.45933
| 0
| 0.033493
| 0.39777
| 0.15869
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114833
| false
| 0
| 0.038278
| 0
| 0.277512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
801641ec8ca21b888e1b44f93a2206239e808d61
| 14,101
|
py
|
Python
|
AutotestWebD/all_models/models/A0012_admin.py
|
yangjourney/sosotest
|
2e88099a829749910ca325253c9b1a2e368d21a0
|
[
"MIT"
] | 422
|
2019-08-18T05:04:20.000Z
|
2022-03-31T06:49:19.000Z
|
AutotestWebD/all_models/models/A0012_admin.py
|
LinSongJian1985/sosotest
|
091863dee531b5726650bb63efd6f169267cbeb4
|
[
"MIT"
] | 10
|
2019-10-24T09:55:38.000Z
|
2021-09-29T17:28:43.000Z
|
AutotestWebD/all_models/models/A0012_admin.py
|
LinSongJian1985/sosotest
|
091863dee531b5726650bb63efd6f169267cbeb4
|
[
"MIT"
] | 202
|
2019-08-18T05:04:27.000Z
|
2022-03-30T05:57:18.000Z
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
from all_models.models.A0001_user import *
from all_models.models.A0002_config import *
from all_models.models.A0003_attribute import *
from all_models.models.A0006_testcase import *
import django.utils.timezone
import datetime
#后台小组
class TbAdminTeam(models.Model):
teamName = models.CharField(max_length=100, db_column="teamName", verbose_name="小组名称")
teamKey = models.CharField(max_length=100, db_column="teamKey", unique=True, verbose_name="小组key",default="")
teamDesc = models.CharField(max_length=100, db_column="teamDesc", verbose_name="小组描述")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = "tb_admin_team"
#后台用户
class TbAdminUser(models.Model):
loginName = models.CharField(max_length=100, db_column="loginName", unique=True, verbose_name="登录名")
passWord = models.CharField(max_length=100, db_column="passWord", verbose_name="密码")
userName = models.CharField(max_length=100, db_column="userName", verbose_name="用户名")
email = models.CharField(max_length=50, verbose_name="用户邮箱", default="")
superManager = models.IntegerField(db_column="superManager",default=0,verbose_name="是否为超级管理员,0否,1是")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True,blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25,db_column='modBy',null = True,blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime',auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime',auto_now=True, verbose_name="修改时间")
class Meta:
db_table = 'tb_admin_user'
#后台角色
class TbAdminRole(models.Model):
roleName = models.CharField(max_length=100, db_column="roleName", verbose_name="角色名")
roleKey = models.CharField(max_length=100, db_column="roleKey", unique=True, verbose_name="角色key", default="")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = "tb_admin_role"
# 权限
# class TbAdminPermission(models.Model):
# permissionName = models.CharField(max_length=100, db_column="permissionName", verbose_name="权限名称")
# permissionKey = models.CharField(max_length=100, db_column="permissionKey", unique=True, verbose_name="权限key", default="")
# isDefaultPermission = models.IntegerField(default=0, verbose_name="状态 0不是默认的 1是默认的")
# state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
# addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
# modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
# addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
# modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
#
# class Meta:
# db_table = 'tb_admin_permissions'
#后台权限
class TbAdminManagePermission(models.Model):
permissionName = models.CharField(max_length=100, db_column="permissionName", verbose_name="权限名称")
permissionKey = models.CharField(max_length=100, db_column="permissionKey", unique=True, verbose_name="权限key", default="")
permissionValue = models.CharField(max_length=200, db_column="permissionValue", verbose_name="权限值", default="")
isDefaultPermission = models.IntegerField(default=0, verbose_name="状态 0不是默认的 1是默认的")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = 'tb_admin_manage_permission'
# 后台用户管理权限
class TbAdminManageUserPermissionRelation(models.Model):
loginName = models.CharField(db_column='loginName', max_length=20, verbose_name="登录账号")
permissionKey = models.CharField(max_length=100, db_column="permissionKey", verbose_name="权限key", default="")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = 'tb_admin_manage_user_permission_relation'
#后台小组权限
class TbAdminTeamPermissionRelation(models.Model):
teamKey = models.CharField(max_length=100, db_column="teamKey", verbose_name="小组key", default="")
permissionKey = models.CharField(max_length=100, db_column="permissionKey", verbose_name="权限key", default="")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = 'tb_admin_team_permission_relation'
#后台用户权限
class TbAdminUserPermissionRelation(models.Model):
loginName = models.CharField(db_column='loginName', max_length=20, verbose_name="登录账号")
permissionKey = models.CharField(max_length=100, db_column="permissionKey", verbose_name="权限key", default="")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = 'tb_admin_user_permission_relation'
#后台小组用户关联
class TbAdminUserTeamRelation(models.Model):
loginName = models.CharField(max_length=100, db_column="loginName", verbose_name="登录名", default="")
teamKey = models.CharField(max_length=100, db_column="teamKey", verbose_name="小组key", default="")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = 'tb_admin_user_team_relation'
#后台角色权限
class TbAdminRolePermissionRelation(models.Model):
roleKey = models.CharField(max_length=100, db_column="roleKey", unique=True, verbose_name="角色key", default="")
permissionKey = models.CharField(max_length=100, db_column="permissionKey", unique=True, verbose_name="权限key",default="")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = 'tb_admin_role_permission_relation'
#后台用户角色关联表
class TbAdminUserRoleRelation(models.Model):
roleKey = models.CharField(max_length=100, db_column="roleKey",verbose_name="角色key", default="")
loginName = models.CharField(max_length=100, db_column="loginName", verbose_name="登录名", default="")
teamKey = models.CharField(max_length=100, db_column="teamKey", verbose_name="小组key", default="")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = 'tb_admin_user_role_relation'
# #后台接口和页面关联表
# class TbAdminInterfaceModuleRelation(models.Model):
# url = models.CharField(max_length=255, db_column="url", verbose_name="url", default="")
# moduleName = models.CharField(max_length=100, db_column="moduleName", verbose_name="接口所属页面", default="")
# state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
# addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
# modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
# addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
# modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
#
# class Meta:
# db_table = 'tb_admin_interface_module_relation'
#后台接口和权限关联表
class TbAdminInterfacePermissionRelation(models.Model):
permissionName = models.CharField(max_length=255,db_column="permissionName",default="") #理论上这个也是惟一的
permissionKey = models.CharField(max_length=100, db_column="permissionKey",unique=True, verbose_name="权限key", default="") # 供关联权限时使用
url = models.CharField(max_length=255, db_column="url", verbose_name="url", default="")
permission = models.CharField(max_length=100, db_column="permission", verbose_name="权限", default="") #供判断权限时使用 run delete check edit copy add
isDefault = models.IntegerField(default=0, verbose_name="是否为默认权限 0否 1是")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = 'tb_admin_interface_permission_relation'
#后台权限
class TbAdminPlatformPermission(models.Model):
permissionName = models.CharField(max_length=255, db_column="permissionName", verbose_name="权限Name", default="")
permissionKey = models.CharField(max_length=100, db_column="permissionKey", unique=True, verbose_name="权限key", default="")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = 'tb_admin_platform_permission'
#后台用户权限
class TbAdminPlatformPermissionUserRelation(models.Model):
loginName = models.CharField(max_length=100, db_column="loginName", verbose_name="登录名")
permissionKey = models.CharField(max_length=100, db_column="permissionKey", verbose_name="权限key", default="")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = 'tb_admin_platform_permission_user_relation'
| 62.393805
| 145
| 0.751223
| 1,864
| 14,101
| 5.464592
| 0.104077
| 0.12419
| 0.10161
| 0.153151
| 0.818378
| 0.809935
| 0.806303
| 0.78225
| 0.78225
| 0.77744
| 0
| 0.019326
| 0.115666
| 14,101
| 226
| 146
| 62.393805
| 0.797514
| 0.162967
| 0
| 0.641892
| 1
| 0
| 0.133742
| 0.02782
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.006757
| 0.054054
| 0
| 0.912162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8018641fda38d6d50d92794e66133dea174fa4e4
| 51
|
py
|
Python
|
shhs/parser/__init__.py
|
bdh-team-12/sleep-predictions-through-deep-learning
|
7664cdffc0a0b0e732bffc95fd01e3ea27687025
|
[
"MIT"
] | 7
|
2019-02-23T17:57:25.000Z
|
2021-03-19T13:32:28.000Z
|
shhs/parser/__init__.py
|
bdh-team-12/sleep-predictions-through-deep-learning
|
7664cdffc0a0b0e732bffc95fd01e3ea27687025
|
[
"MIT"
] | 7
|
2019-03-02T16:55:57.000Z
|
2019-04-27T20:11:12.000Z
|
shhs/parser/__init__.py
|
bdh-team-12/sleep-predictions-through-deep-learning
|
7664cdffc0a0b0e732bffc95fd01e3ea27687025
|
[
"MIT"
] | null | null | null |
from . import xml_profusion
from . import xml_nsrr
| 17
| 27
| 0.803922
| 8
| 51
| 4.875
| 0.625
| 0.512821
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 51
| 2
| 28
| 25.5
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
805549c81d9bd0b0783fb83453c3f31892f8f851
| 6,501
|
py
|
Python
|
simon_auto.py
|
ShangqunYu/Rainbow_RBF-DQN
|
3449f7808e7a7399d90cb79b19e7c2360159897c
|
[
"MIT"
] | null | null | null |
simon_auto.py
|
ShangqunYu/Rainbow_RBF-DQN
|
3449f7808e7a7399d90cb79b19e7c2360159897c
|
[
"MIT"
] | null | null | null |
simon_auto.py
|
ShangqunYu/Rainbow_RBF-DQN
|
3449f7808e7a7399d90cb79b19e7c2360159897c
|
[
"MIT"
] | null | null | null |
import os
os.system("python experiments/experiment.py --hyper_parameter_name 30 --seed 0 --experiment_name \"./results/Hopper\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 30 --seed 1 --experiment_name \"./results/Hopper\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 30 --seed 2 --experiment_name \"./results/Hopper\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 30 --seed 3 --experiment_name \"./results/Hopper\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 30 --seed 4 --experiment_name \"./results/Hopper\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 30 --seed 0 --experiment_name \"./results/Hopper\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 30 --seed 1 --experiment_name \"./results/Hopper\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 30 --seed 2 --experiment_name \"./results/Hopper\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 30 --seed 3 --experiment_name \"./results/Hopper\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 30 --seed 4 --experiment_name \"./results/Hopper\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 40 --seed 0 --experiment_name \"./results/HalfCheetah\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 40 --seed 1 --experiment_name \"./results/HalfCheetah\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 40 --seed 2 --experiment_name \"./results/HalfCheetah\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 40 --seed 3 --experiment_name \"./results/HalfCheetah\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 40 --seed 4 --experiment_name \"./results/HalfCheetah\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 40 --seed 0 --experiment_name \"./results/HalfCheetah\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 40 --seed 1 --experiment_name \"./results/HalfCheetah\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 40 --seed 2 --experiment_name \"./results/HalfCheetah\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 40 --seed 3 --experiment_name \"./results/HalfCheetah\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 40 --seed 4 --experiment_name \"./results/HalfCheetah\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 50 --seed 0 --experiment_name \"./results/Ant\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 50 --seed 1 --experiment_name \"./results/Ant\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 50 --seed 2 --experiment_name \"./results/Ant\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 50 --seed 3 --experiment_name \"./results/Ant\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 50 --seed 4 --experiment_name \"./results/Ant\" --run_title \"vanilla\" --double False --per False --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 50 --seed 0 --experiment_name \"./results/Ant\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 50 --seed 1 --experiment_name \"./results/Ant\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 50 --seed 2 --experiment_name \"./results/Ant\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 50 --seed 3 --experiment_name \"./results/Ant\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
os.system("python experiments/experiment.py --hyper_parameter_name 50 --seed 4 --experiment_name \"./results/Ant\" --run_title \"per\" --double False --per True --nstep 1 --dueling False --noisy_layers False")
| 171.078947
| 222
| 0.740348
| 902
| 6,501
| 5.169623
| 0.037694
| 0.051469
| 0.090071
| 0.160841
| 0.998284
| 0.998284
| 0.998284
| 0.998284
| 0.998284
| 0.998284
| 0
| 0.020363
| 0.093524
| 6,501
| 37
| 223
| 175.702703
| 0.770915
| 0
| 0
| 0
| 0
| 0
| 0.805261
| 0.21689
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.032258
| 0
| 0.032258
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
1d234b2718742a91c0b8199118854b76de780ac4
| 4,938
|
py
|
Python
|
test/programytest/rdf/test_remove.py
|
cdoebler1/AIML2
|
ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a
|
[
"MIT"
] | 345
|
2016-11-23T22:37:04.000Z
|
2022-03-30T20:44:44.000Z
|
test/programytest/rdf/test_remove.py
|
MikeyBeez/program-y
|
00d7a0c7d50062f18f0ab6f4a041068e119ef7f0
|
[
"MIT"
] | 275
|
2016-12-07T10:30:28.000Z
|
2022-02-08T21:28:33.000Z
|
test/programytest/rdf/test_remove.py
|
VProgramMist/modified-program-y
|
f32efcafafd773683b3fe30054d5485fe9002b7d
|
[
"MIT"
] | 159
|
2016-11-28T18:59:30.000Z
|
2022-03-20T18:02:44.000Z
|
import unittest
from programy.rdf.collection import RDFCollection
class RDFCollectionRemoveTests(unittest.TestCase):
def add_data(self, collection):
collection.add_entity("MONKEY", "LEGS", "2", "ANIMALS")
collection.add_entity("MONKEY", "HASFUR", "true", "ANIMALS")
collection.add_entity("ZEBRA", "LEGS", "4", "ANIMALS")
collection.add_entity("BIRD", "LEGS", "2", "ANIMALS")
collection.add_entity("ELEPHANT", "TRUNK", "true", "ANIMALS")
def test_remove_subject(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
all = collection.all_as_tuples()
remains = collection.remove(all, subject='MONKEY')
self.assertIsNotNone(remains)
self.assertEqual(3, len(remains))
self.assertTrue(["ZEBRA", "LEGS", "4"] in remains)
self.assertTrue(["BIRD", "LEGS", "2"] in remains)
self.assertTrue(["ELEPHANT", "TRUNK", "true"] in remains)
def test_remove_subject_predicate(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
all = collection.all_as_tuples()
remains = collection.remove(all, subject='MONKEY', predicate="LEGS")
self.assertIsNotNone(remains)
self.assertEqual(4, len(remains))
self.assertTrue(["MONKEY", "HASFUR", "true"] in remains)
self.assertTrue(["ZEBRA", "LEGS", "4"] in remains)
self.assertTrue(["BIRD", "LEGS", "2"] in remains)
self.assertTrue(["ELEPHANT", "TRUNK", "true"] in remains)
def test_remove_subject_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
all = collection.all_as_tuples()
remains = collection.remove(all, subject='MONKEY', obj="2")
self.assertIsNotNone(remains)
self.assertEqual(4, len(remains))
self.assertTrue(["MONKEY", "HASFUR", "true"] in all)
self.assertTrue(["ZEBRA", "LEGS", "4"] in all)
self.assertTrue(["BIRD", "LEGS", "2"] in all)
self.assertTrue(["ELEPHANT", "TRUNK", "true"] in all)
def test_remove_predicate(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
all = collection.all_as_tuples()
remains = collection.remove(all, predicate='LEGS')
self.assertIsNotNone(remains)
self.assertEqual(2, len(remains))
self.assertTrue(["MONKEY", "HASFUR", "true"] in remains)
self.assertTrue(["ELEPHANT", "TRUNK", "true"] in remains)
def test_remove_predicate_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
all = collection.all_as_tuples()
remains = collection.remove(all, predicate='LEGS', obj="2")
self.assertIsNotNone(remains)
self.assertEqual(3, len(remains))
self.assertTrue(["MONKEY", "HASFUR", "true"] in all)
self.assertTrue(["ZEBRA", "LEGS", "4"] in all)
self.assertTrue(["ELEPHANT", "TRUNK", "true"] in all)
def test_remove_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
all = collection.all_as_tuples()
remains = collection.remove(all, obj='2')
self.assertIsNotNone(remains)
self.assertEqual(3, len(remains))
self.assertTrue(["MONKEY", "HASFUR", "true"] in remains)
self.assertTrue(["ZEBRA", "LEGS", "4"] in remains)
self.assertTrue(["ELEPHANT", "TRUNK", "true"] in remains)
def test_remove_subject_predicate_object(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
all = collection.all_as_tuples()
remains = collection.remove(all, subject='MONKEY', predicate="LEGS", obj="2")
self.assertIsNotNone(remains)
self.assertEqual(4, len(remains))
self.assertTrue(["MONKEY", "HASFUR", "true"] in remains)
self.assertTrue(["ZEBRA", "LEGS", "4"] in remains)
self.assertTrue(["BIRD", "LEGS", "2"] in remains)
self.assertTrue(["ELEPHANT", "TRUNK", "true"] in remains)
def test_remove_nothing(self):
collection = RDFCollection()
self.assertIsNotNone(collection)
self.add_data(collection)
all = collection.all_as_tuples()
remains = collection.remove(all)
self.assertIsNotNone(remains)
self.assertEqual(5, len(remains))
self.assertTrue(["MONKEY", "LEGS", "2"] in remains)
self.assertTrue(["MONKEY", "HASFUR", "true"] in remains)
self.assertTrue(["ZEBRA", "LEGS", "4"] in remains)
self.assertTrue(["BIRD", "LEGS", "2"] in remains)
self.assertTrue(["ELEPHANT", "TRUNK", "true"] in remains)
| 34.055172
| 85
| 0.630215
| 525
| 4,938
| 5.830476
| 0.08381
| 0.111402
| 0.157792
| 0.112708
| 0.90624
| 0.883045
| 0.857563
| 0.844169
| 0.844169
| 0.844169
| 0
| 0.007299
| 0.223167
| 4,938
| 144
| 86
| 34.291667
| 0.790667
| 0
| 0
| 0.712871
| 0
| 0
| 0.104496
| 0
| 0
| 0
| 0
| 0
| 0.514851
| 1
| 0.089109
| false
| 0
| 0.019802
| 0
| 0.118812
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1d467debfa270eda0b3cda05db6f92123f925981
| 203
|
py
|
Python
|
tests/parser/aggregates.count.assignment.2.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.count.assignment.2.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.count.assignment.2.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
a(2).
b(1).
c(2,2).
p(2).
q(2).
bug :- p(M),q(N), #count{ V:a(M),b(N),c(M,V) } = N.
"""
output = """
a(2).
b(1).
c(2,2).
p(2).
q(2).
bug :- p(M),q(N), #count{ V:a(M),b(N),c(M,V) } = N.
"""
| 10.684211
| 51
| 0.369458
| 54
| 203
| 1.388889
| 0.259259
| 0.053333
| 0.08
| 0.106667
| 0.853333
| 0.853333
| 0.853333
| 0.853333
| 0.853333
| 0.853333
| 0
| 0.071856
| 0.17734
| 203
| 18
| 52
| 11.277778
| 0.377246
| 0
| 0
| 0.875
| 0
| 0.125
| 0.847291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
1d60a06a5c5fcfacb41562e03525ea718fe5ba1c
| 84
|
py
|
Python
|
codewars/8kyu/doha22/kata8/hello_world/hello_world.py
|
doha22/Training_one
|
0cd7cf86c7da0f6175834146296b763d1841766b
|
[
"MIT"
] | null | null | null |
codewars/8kyu/doha22/kata8/hello_world/hello_world.py
|
doha22/Training_one
|
0cd7cf86c7da0f6175834146296b763d1841766b
|
[
"MIT"
] | 2
|
2019-01-22T10:53:42.000Z
|
2019-01-31T08:02:48.000Z
|
codewars/8kyu/doha22/kata8/hello_world/hello_world.py
|
doha22/Training_one
|
0cd7cf86c7da0f6175834146296b763d1841766b
|
[
"MIT"
] | 13
|
2019-01-22T10:37:42.000Z
|
2019-01-25T13:30:43.000Z
|
def greet():
return "hello world!"
def greet2():
return "hello world!"
| 14
| 25
| 0.583333
| 10
| 84
| 4.9
| 0.6
| 0.44898
| 0.653061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.27381
| 84
| 6
| 26
| 14
| 0.786885
| 0
| 0
| 0.5
| 0
| 0
| 0.282353
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
d529e7d2264947af3e335119f04f8f5f8c11b353
| 2,622
|
py
|
Python
|
insights/tests/client/data_collector/test_write_metadata.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | null | null | null |
insights/tests/client/data_collector/test_write_metadata.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | null | null | null |
insights/tests/client/data_collector/test_write_metadata.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | null | null | null |
import six
import mock
from insights.client.constants import InsightsConstants as constants
from insights.client.config import InsightsConfig
from insights.client.data_collector import DataCollector
from mock.mock import patch
@patch('insights.client.data_collector.os.remove')
@patch('insights.client.data_collector.InsightsArchive')
def test_egg_release_file_read_and_written(archive, remove):
'''
Verify the egg release file is read from file and
written to the archive
'''
if six.PY3:
open_name = 'builtins.open'
else:
open_name = '__builtin__.open'
with patch(open_name, create=True) as mock_open:
mock_open.side_effect = [mock.mock_open(read_data='/testvalue').return_value]
c = InsightsConfig()
d = DataCollector(c)
d._write_egg_release()
remove.assert_called_once_with(constants.egg_release_file)
d.archive.add_metadata_to_archive.assert_called_once_with('/testvalue', '/egg_release')
@patch('insights.client.data_collector.os.remove')
@patch('insights.client.data_collector.InsightsArchive')
def test_egg_release_file_read_and_written_no_delete(archive, remove):
'''
Verify the egg release file is read from file and
written to the archive, even if the file cannot be deleted
'''
if six.PY3:
open_name = 'builtins.open'
else:
open_name = '__builtin__.open'
remove.side_effect = OSError('test')
with patch(open_name, create=True) as mock_open:
mock_open.side_effect = [mock.mock_open(read_data='/testvalue').return_value]
c = InsightsConfig()
d = DataCollector(c)
d._write_egg_release()
remove.assert_called_once_with(constants.egg_release_file)
d.archive.add_metadata_to_archive.assert_called_once_with('/testvalue', '/egg_release')
@patch('insights.client.data_collector.os.remove')
@patch('insights.client.data_collector.InsightsArchive')
def test_egg_release_file_read_and_written_no_read(archive, remove):
'''
Verify that when the egg release file cannot be read,
a blank string is written to the archive
'''
if six.PY3:
open_name = 'builtins.open'
else:
open_name = '__builtin__.open'
remove.side_effect = OSError('test')
with patch(open_name, create=True) as mock_open:
mock_open.side_effect = IOError('test')
c = InsightsConfig()
d = DataCollector(c)
d._write_egg_release()
remove.assert_called_once_with(constants.egg_release_file)
d.archive.add_metadata_to_archive.assert_called_once_with('', '/egg_release')
| 35.432432
| 95
| 0.720824
| 351
| 2,622
| 5.065527
| 0.190883
| 0.084364
| 0.070866
| 0.106299
| 0.816648
| 0.816648
| 0.816648
| 0.816648
| 0.816648
| 0.816648
| 0
| 0.001399
| 0.182304
| 2,622
| 73
| 96
| 35.917808
| 0.827892
| 0.105263
| 0
| 0.78
| 0
| 0
| 0.189248
| 0.112762
| 0
| 0
| 0
| 0
| 0.12
| 1
| 0.06
| false
| 0
| 0.12
| 0
| 0.18
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d54bd4782f667cebf7fa62d033d086f4f181e818
| 16,222
|
py
|
Python
|
qbay_test/frontend/test_updateprofile.py
|
HamizJamil/group22-cisc327
|
b92c2a81fbf8d4d7b4be360bf242fff98e0c9bfd
|
[
"MIT"
] | null | null | null |
qbay_test/frontend/test_updateprofile.py
|
HamizJamil/group22-cisc327
|
b92c2a81fbf8d4d7b4be360bf242fff98e0c9bfd
|
[
"MIT"
] | 1
|
2022-03-01T19:09:51.000Z
|
2022-03-01T19:09:51.000Z
|
qbay_test/frontend/test_updateprofile.py
|
HamizJamil/group22-cisc327
|
b92c2a81fbf8d4d7b4be360bf242fff98e0c9bfd
|
[
"MIT"
] | null | null | null |
from seleniumbase import BaseCase
from qbay_test.conftest import base_url
from qbay.models import User
class FrontEndUpdateProfileTest(BaseCase):
# Smoke Test - Register Update user and verify access to update profile
def test_update_profile1(self):
self.open(base_url + '/registration') # open up the page
self.type("#user_name", "profiletest") # insert the text fields
self.type("#user_email", "update@gmail.com")
self.type("#user_pass", "ABC@abc")
self.find_element("#Submit").click() # click save to submit
# verifying successful registration
new_user = User.query.filter_by(email="update@gmail.com").first()
assert new_user is not None
self.open(base_url + '/')
self.find_element("#navbarDropdownMenuLink1").click()
self.find_element("#updateprofile").click()
# Getting current page title
self.open(base_url + '/updateprofile')
assert self.assert_title("Update Profile")
# Set of Input Partitioning Tests
# Correct input test
def test_update_profile2(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click login
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "profiletest")
self.type("#shipping_address", "Queens University, "
"99 University Ave, Kingston, ON")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").all()
assert updated is not None
# incorrect username with space prefix
def test_update_profile3(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", " profiletest")
self.type("#shipping_address", "Queens University")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.user_name != " profiletest"
# incorrect username with space suffix
def test_update_profile4(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "profiletest ")
self.type("#shipping_address", "Queens University")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.user_name != "profiletest "
# incorrect username less than 2 characters
def test_update_profile5(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "p")
self.type("#shipping_address", "Queens University")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.user_name != "p"
# incorrect username longer than 20 characters
def test_update_profile6(self, *_):
longer_than_20 = "p" * 22
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", longer_than_20)
self.type("#shipping_address", "Queens University")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.user_name != longer_than_20
# incorrect empty username
def test_update_profile7(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "")
self.type("#shipping_address", "Queens University")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.user_name != ""
# incorrect username non-alphanumeric (special character)
def test_update_profile8(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "prof!letest")
self.type("#shipping_address", "Queens University")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.user_name != "prof!letest"
# incorrect shipping address empty
def test_update_profile9(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
# veryfing that it redirects to homepage
# verifying a product is successfully commited
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "profiletest")
self.type("#shipping_address", "")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.shipping_address != ""
# incorrect shipping address non-alphanumeric (special character!)
def test_update_profile10(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
# veryfing that it redirects to homepage
# verifying a product is successfully commited
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "profiletest")
self.type("#shipping_address", "Queens University, ! "
"99 University Ave, Kingston, ON")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.shipping_address != "Queens University, !" \
" 99 University Ave, Kingston, ON"
# correct postal code conversion: lower case to uppercase with no space
def test_update_profile11(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "profiletest")
self.type("#shipping_address", "Queens University, "
"99 University Ave, Kingston, ON")
self.type("#postal_code", "K8l 3n6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.postal_code == "K8L3N6"
assert updated is not None
# assert updated.user_name == "profiletest"
# assert updated.shipping_address == "Queens University," \
# " 99 University Ave, Kingston, ON"
# assert updated.postal_code == "K8L3N6"
# incorrect invalid postal code
def test_update_profile12(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
# veryfing that it redirects to homepage
# verifying a product is successfully commited
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "profiletest")
self.type("#shipping_address", "Queens University, "
"99 University Ave, Kingston, ON")
self.type("#postal_code", "K2AA5Z9")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.user_name == "profiletest"
assert updated.shipping_address == "Queens University," \
" 99 University Ave, Kingston, ON"
assert updated.postal_code != "K2AA5Z9"
# Set of Boundary Testings
# 13Correct username within the boundary: 20 characters
def test_update_profile13(self, *_):
username_20 = "p" * 20
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", username_20)
self.type("#shipping_address", "Queens University")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.user_name == username_20
# Correct username within the boundray: 3 characters
def test_update_profile14(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "ppp")
self.type("#shipping_address", "Queens University")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.user_name == "ppp"
# Incorrect username out of range: 21 characters
def test_update_profile15(self, *_):
username_21 = "p" * 21
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", username_21)
self.type("#shipping_address", "Queens University")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.user_name != username_21
# Incorrect username out of range: 2 character
def test_update_profile16(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "pp")
self.type("#shipping_address", "Queens University")
self.type("#postal_code", "K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.user_name != "pp"
# Correct Postal code: Correct length = 6, Follows X9X9X9
def test_update_profile17(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "profiletest")
self.type("#shipping_address", "Queens University, "
"99 University Ave, Kingston, ON")
self.type("#postal_code", "K7L3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.postal_code == "K7L3N6"
# Correct postal code: Correct length = 7, Follows X9X 9X9
def test_update_profile18(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "profiletest")
self.type("#shipping_address", "Queens University, "
"99 University Ave, Kingston, ON")
self.type("#postal_code", " K7L 3N6")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.postal_code == "K7L3N6"
# Incorrect postal code: correct length wrong order 9X9X9X
def test_update_profile19(self, *_):
self.open(base_url + '/login') # open up the page
self.type("#user_email", "update@gmail.com") # insert the text fields
self.type("#user_pass", "ABC@abc")
self.find_element("#login").click() # click save to submit
self.open(base_url + '/updateprofile')
self.type("#user_email", "update@gmail.com")
self.type("#user_name", "profiletest")
self.type("#shipping_address", "Queens University, "
"99 University Ave, Kingston, ON")
self.type("#postal_code", "3N6K7L")
self.find_element("#Submit").click()
updated = User.query.filter_by(email="update@gmail.com").first()
assert updated.postal_code != "3N6K7L"
| 42.802111
| 79
| 0.616262
| 1,980
| 16,222
| 4.89596
| 0.081818
| 0.091603
| 0.092841
| 0.109759
| 0.856097
| 0.821849
| 0.821849
| 0.808232
| 0.808232
| 0.805653
| 0
| 0.013614
| 0.239305
| 16,222
| 378
| 80
| 42.915344
| 0.771961
| 0.161756
| 0
| 0.766917
| 0
| 0
| 0.295699
| 0.001777
| 0
| 0
| 0
| 0
| 0.086466
| 1
| 0.071429
| false
| 0.071429
| 0.011278
| 0
| 0.086466
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
d556cf05121d796cc434ac49cb8f86e49a68a675
| 18,412
|
py
|
Python
|
ffd_visualization.py
|
lsDrizzle/FreeFormDeformation-SketchDetection
|
da9e0afad8d4721e01f792d2d1838feed9a61bb8
|
[
"MIT"
] | 8
|
2021-09-25T06:00:53.000Z
|
2022-03-12T03:31:04.000Z
|
ffd_visualization.py
|
wxdrizzle/FreeFormDeformation-SketchDetection
|
8b9da9b8e40ba9c98f012e03eb0388f4a7d5c613
|
[
"MIT"
] | null | null | null |
ffd_visualization.py
|
wxdrizzle/FreeFormDeformation-SketchDetection
|
8b9da9b8e40ba9c98f012e03eb0388f4a7d5c613
|
[
"MIT"
] | 1
|
2021-02-22T21:26:57.000Z
|
2021-02-22T21:26:57.000Z
|
from manimlib.imports import *
CON_POINT_RANGE = 7
# B spline basis function
def B_0(u):
assert 0 <= u <= 1
return (1. - u) ** 3. / 6.
def B_1(u):
assert 0 <= u <= 1
return (3 * u ** 3 - 6 * u ** 2 + 4) / 6
def B_2(u):
assert 0 <= u <= 1
return (-3 * u ** 3 + 3 * u ** 2 + 3 * u + 1) / 6
def B_3(u):
assert 0 <= u <= 1
return u ** 3 / 6
def naive_transformation(pos_3d, mesh, delta, B, K):
pos = pos_3d[0:2]
pos = (pos - B) / K
pos_reg = pos / delta
pos_floor = np.floor(pos_reg)
uv = pos_reg - pos_floor
ij = pos_floor - 1.
B_00 = B_0(uv[0]) * B_0(uv[1])
B_01 = B_0(uv[0]) * B_1(uv[1])
B_02 = B_0(uv[0]) * B_2(uv[1])
B_03 = B_0(uv[0]) * B_3(uv[1])
B_10 = B_1(uv[0]) * B_0(uv[1])
B_11 = B_1(uv[0]) * B_1(uv[1])
B_12 = B_1(uv[0]) * B_2(uv[1])
B_13 = B_1(uv[0]) * B_3(uv[1])
B_20 = B_2(uv[0]) * B_0(uv[1])
B_21 = B_2(uv[0]) * B_1(uv[1])
B_22 = B_2(uv[0]) * B_2(uv[1])
B_23 = B_2(uv[0]) * B_3(uv[1])
B_30 = B_3(uv[0]) * B_0(uv[1])
B_31 = B_3(uv[0]) * B_1(uv[1])
B_32 = B_3(uv[0]) * B_2(uv[1])
B_33 = B_3(uv[0]) * B_3(uv[1])
B_all = np.array(((B_00, B_01, B_02, B_03),
(B_10, B_11, B_12, B_13),
(B_20, B_21, B_22, B_23),
(B_30, B_31, B_32, B_33)))
mesh_part = mesh[:, int(ij[0] + 1):int(ij[0] + 1) + 4, int(ij[1] + 1):int(ij[1] + 1) + 4]
tmp = B_all * mesh_part
output = np.zeros(3)
output[0:2] = [(tmp[0, :, :].sum() * K) + B, (tmp[1, :, :].sum() * K) + B]
return output
class FFDSquare(Scene):
def __init__(self, mesh, mesh_trans, delta, **scene_kwargs):
self.mesh = mesh
self.mesh_trans = mesh_trans
self.delta = delta
self.mesh_size = self.mesh.shape[1] - 3
self.K = CON_POINT_RANGE / (self.mesh_size + 1) / delta
self.B = CON_POINT_RANGE * (1 / (self.mesh_size + 1) - 0.5)
self.GEOMETRY_SIZE = self.K * delta * (self.mesh_size - 1) - 0.0001
self.AXIS_MIN = -0.5 * self.GEOMETRY_SIZE
self.CONFIG = {
"x_min": self.AXIS_MIN,
"x_max": -self.AXIS_MIN,
"y_min": self.AXIS_MIN,
"y_max": -self.AXIS_MIN,
"background_line_style": {
"stroke_color": "#FFFFFF",
},
"x_line_frequency": 0.5 * self.K * delta,
"y_line_frequency": 0.5 * self.K * delta,
}
super().__init__(**scene_kwargs)
def construct(self):
control_points = VGroup(*[Dot(point=[self.mesh[0, i, j] * self.K + self.B,
self.mesh[1, i, j] * self.K + self.B,
0])
for i in range(self.mesh_size + 2) for j in range(self.mesh_size + 2)])
control_points_trans = VGroup(*[Dot(point=[self.mesh_trans[0, i, j] * self.K + self.B,
self.mesh_trans[1, i, j] * self.K + self.B,
0])
for i in range(self.mesh_size + 2) for j in range(self.mesh_size + 2)])
control_lines = VGroup(
*[Line(np.append(self.mesh[:, i, j] * self.K + self.B, 0),
np.append(self.mesh[:, i + 1, j] * self.K + self.B, 0))
for i in range(self.mesh_size + 1) for j in range(self.mesh_size + 2)],
*[Line(np.append(self.mesh[:, i, j] * self.K + self.B, 0),
np.append(self.mesh[:, i, j + 1] * self.K + self.B, 0))
for i in range(self.mesh_size + 2) for j in range(self.mesh_size + 1)]
)
control_lines_trans = VGroup(
*[Line(np.append(self.mesh_trans[:, i, j] * self.K + self.B, 0),
np.append(self.mesh_trans[:, i + 1, j] * self.K + self.B, 0))
for i in range(self.mesh_size + 1) for j in range(self.mesh_size + 2)],
*[Line(np.append(self.mesh_trans[:, i, j] * self.K + self.B, 0),
np.append(self.mesh_trans[:, i, j + 1] * self.K + self.B, 0))
for i in range(self.mesh_size + 2) for j in range(self.mesh_size + 1)]
)
control_lines.set_color("#70c3ff")
control_lines_trans.set_color("#70c3ff")
grid = NumberPlane(**self.CONFIG)
square_num = 10.
square_side_length = self.GEOMETRY_SIZE / square_num
squares = VGroup(
*[Square(side_length=square_side_length, fill_opacity=1).shift(x * RIGHT + y * UP)
for x in np.arange(self.AXIS_MIN + 0.5 * square_side_length,
self.AXIS_MIN + self.GEOMETRY_SIZE - 0.4 * square_side_length,
square_side_length)
for y in np.arange(self.AXIS_MIN + 0.5 * square_side_length,
self.AXIS_MIN + self.GEOMETRY_SIZE - 0.4 * square_side_length,
square_side_length)])
squares.set_color_by_gradient(RED, ORANGE, YELLOW, GREEN, BLUE, PURPLE)
#
self.add(squares, grid, control_lines, control_points)
squares.save_state()
grid.save_state()
control_points.save_state()
control_lines.save_state()
grid.prepare_for_nonlinear_transform()
self.play(
Transform(control_points, control_points_trans),
Transform(control_lines, control_lines_trans),
ApplyPointwiseFunction(lambda p: naive_transformation(p, self.mesh_trans, self.delta, self.B, self.K),
squares),
ApplyPointwiseFunction(lambda p: naive_transformation(p, self.mesh_trans, self.delta, self.B, self.K),
grid),
run_time=1,
)
self.play(
Restore(grid, run_time=1),
Restore(squares, run_time=1),
Restore(control_points, run_time=1),
Restore(control_lines, run_time=1)
)
class FFDDots(Scene):
def __init__(self, mesh, mesh_trans, delta, **scene_kwargs):
self.mesh = mesh
self.mesh_trans = mesh_trans
self.delta = delta
self.mesh_size = self.mesh.shape[1] - 3
self.K = CON_POINT_RANGE / (self.mesh_size + 1) / delta
self.B = CON_POINT_RANGE * (1 / (self.mesh_size + 1) - 0.5)
GEOMETRY_SIZE = self.K * delta * (self.mesh_size - 1) - 0.0001
self.AXIS_MIN = -0.5 * GEOMETRY_SIZE
self.CONFIG = {
"x_min": self.AXIS_MIN,
"x_max": -self.AXIS_MIN,
"y_min": self.AXIS_MIN,
"y_max": -self.AXIS_MIN,
"background_line_style": {
"stroke_color": "#FFFFFF",
},
"x_line_frequency": 0.5 * self.K * delta,
"y_line_frequency": 0.5 * self.K * delta,
}
super().__init__(**scene_kwargs)
def construct(self):
control_lines = VGroup(
*[Line(np.append(self.mesh[:, i, j] * self.K + self.B, 0),
np.append(self.mesh[:, i + 1, j] * self.K + self.B, 0))
for i in range(self.mesh_size + 1) for j in range(self.mesh_size + 2)],
*[Line(np.append(self.mesh[:, i, j] * self.K + self.B, 0),
np.append(self.mesh[:, i, j + 1] * self.K + self.B, 0))
for i in range(self.mesh_size + 2) for j in range(self.mesh_size + 1)]
)
control_lines_trans = VGroup(
*[Line(np.append(self.mesh_trans[:, i, j] * self.K + self.B, 0),
np.append(self.mesh_trans[:, i + 1, j] * self.K + self.B, 0))
for i in range(self.mesh_size + 1) for j in range(self.mesh_size + 2)],
*[Line(np.append(self.mesh_trans[:, i, j] * self.K + self.B, 0),
np.append(self.mesh_trans[:, i, j + 1] * self.K + self.B, 0))
for i in range(self.mesh_size + 2) for j in range(self.mesh_size + 1)]
)
control_grid = VGroup(control_lines, control_lines_trans)
control_grid.set_color("#70c3ff")
grid = NumberPlane(**self.CONFIG)
dot_radius = 0.5 * 0.25 * self.delta * self.K
dots = VGroup(
*[Dot(radius=dot_radius, fill_opacity=1).shift(x * RIGHT + y * UP)
for x in np.arange(self.AXIS_MIN + 0.25 * self.delta * self.K,
-self.AXIS_MIN - 0.24 * self.delta * self.K,
0.5 * self.delta * self.K)
for y in np.arange(self.AXIS_MIN + 0.25 * self.delta * self.K,
-self.AXIS_MIN - 0.24 * self.delta * self.K,
0.5 * self.delta * self.K)])
dots.set_color_by_gradient(RED, ORANGE, YELLOW, GREEN, BLUE, PURPLE)
self.add(dots, grid, control_lines)
dots.save_state()
grid.save_state()
control_lines.save_state()
grid.prepare_for_nonlinear_transform()
self.play(
Transform(control_lines, control_lines_trans),
ApplyPointwiseFunction(lambda p: naive_transformation(p, self.mesh_trans, self.delta, self.B, self.K),
dots),
ApplyPointwiseFunction(lambda p: naive_transformation(p, self.mesh_trans, self.delta, self.B, self.K),
grid),
run_time=1,
)
self.play(Restore(grid, run_time=1),
Restore(dots, run_time=1),
Restore(control_lines, run_time=1))
class FFDVectorsWithGrid(Scene):
def __init__(self, mesh, mesh_trans, delta, **scene_kwargs):
self.mesh = mesh
self.mesh_trans = mesh_trans
self.delta = delta
self.mesh_size = self.mesh.shape[1] - 3
self.K = CON_POINT_RANGE / (self.mesh_size + 1) / delta
self.B = CON_POINT_RANGE * (1 / (self.mesh_size + 1) - 0.5)
GEOMETRY_SIZE = self.K * delta * (self.mesh_size - 1) - 0.0001
self.AXIS_MIN = -0.5 * GEOMETRY_SIZE
self.GEOMETRY_SIZE = self.K * delta * (self.mesh_size - 1) - 0.0001
self.AXIS_MIN = -0.5 * GEOMETRY_SIZE
self.CONFIG = {
"x_min": self.AXIS_MIN,
"x_max": -self.AXIS_MIN,
"y_min": self.AXIS_MIN,
"y_max": -self.AXIS_MIN,
"background_line_style": {
"stroke_color": "#FFFFFF",
},
"x_line_frequency": 0.5 * self.K * delta,
"y_line_frequency": 0.5 * self.K * delta,
"max_stroke_width_to_length_ratio": 10,
}
super().__init__(**scene_kwargs)
def construct(self):
control_line_width = 1.5
control_lines = VGroup(
*[Line(np.append(self.mesh[:, i, j] * self.K + self.B, 0),
np.append(self.mesh[:, i + 1, j] * self.K + self.B, 0), stroke_width=control_line_width)
for i in range(self.mesh_size + 1) for j in range(self.mesh_size + 2)],
*[Line(np.append(self.mesh[:, i, j] * self.K + self.B, 0),
np.append(self.mesh[:, i, j + 1] * self.K + self.B, 0), stroke_width=control_line_width)
for i in range(self.mesh_size + 2) for j in range(self.mesh_size + 1)]
)
control_lines_trans = VGroup(
*[Line(np.append(self.mesh_trans[:, i, j] * self.K + self.B, 0),
np.append(self.mesh_trans[:, i + 1, j] * self.K + self.B, 0), stroke_width=control_line_width)
for i in range(self.mesh_size + 1) for j in range(self.mesh_size + 2)],
*[Line(np.append(self.mesh_trans[:, i, j] * self.K + self.B, 0),
np.append(self.mesh_trans[:, i, j + 1] * self.K + self.B, 0), stroke_width=control_line_width)
for i in range(self.mesh_size + 2) for j in range(self.mesh_size + 1)]
)
control_grid = VGroup(control_lines, control_lines_trans)
control_grid.set_color("#70c3ff")
grid = NumberPlane(**self.CONFIG)
points = [x * RIGHT + y * UP
for x in np.arange(self.AXIS_MIN + 0.25 * self.delta * self.K,
-self.AXIS_MIN - 0.24 * self.delta * self.K,
0.5 * self.delta * self.K)
for y in np.arange(self.AXIS_MIN + 0.25 * self.delta * self.K,
-self.AXIS_MIN - 0.24 * self.delta * self.K,
0.5 * self.delta * self.K)
]
vectors = VGroup(*[Vector([0, 0, 0]).shift(point) for point in points])
scale_factor = 1
vectors_trans = VGroup(*[Vector(scale_factor *
(naive_transformation(point, self.mesh_trans, self.delta, self.B, self.K) - point),
**self.CONFIG).shift(point)
for point in points])
vectors_trans.set_color_by_gradient(RED, ORANGE, YELLOW, GREEN, BLUE)
self.add(vectors, grid, control_lines)
vectors.save_state()
grid.save_state()
control_lines.save_state()
grid.prepare_for_nonlinear_transform()
self.play(
Transform(control_lines, control_lines_trans),
Transform(vectors, vectors_trans),
ApplyPointwiseFunction(lambda p: naive_transformation(p, self.mesh_trans, self.delta, self.B, self.K),
grid),
run_time=1,
)
self.play(Restore(grid, run_time=1),
Restore(vectors, run_time=1),
Restore(control_lines, run_time=1))
class FFDVectors(Scene):
def __init__(self, mesh, mesh_trans, delta, **scene_kwargs):
self.mesh = mesh
self.mesh_trans = mesh_trans
self.delta = delta
self.mesh_size = self.mesh.shape[1] - 3
self.K = CON_POINT_RANGE / (self.mesh_size + 1) / delta
self.B = CON_POINT_RANGE * (1 / (self.mesh_size + 1) - 0.5)
GEOMETRY_SIZE = self.K * delta * (self.mesh_size - 1) - 0.0001
self.AXIS_MIN = -0.5 * GEOMETRY_SIZE
self.GEOMETRY_SIZE = self.K * delta * (self.mesh_size - 1) - 0.0001
self.AXIS_MIN = -0.5 * GEOMETRY_SIZE
self.CONFIG = {
"x_min": self.AXIS_MIN,
"x_max": -self.AXIS_MIN,
"y_min": self.AXIS_MIN,
"y_max": -self.AXIS_MIN,
"background_line_style": {
"stroke_color": "#FFFFFF",
},
"x_line_frequency": 0.5 * self.K * delta,
"y_line_frequency": 0.5 * self.K * delta,
"max_stroke_width_to_length_ratio": 10,
}
super().__init__(**scene_kwargs)
def construct(self):
control_points = VGroup(*[Dot(point=[self.mesh[0, i, j] * self.K + self.B,
self.mesh[1, i, j] * self.K + self.B,
0], radius=0.05 * self.delta * self.K)
for i in range(self.mesh_size + 2) for j in range(self.mesh_size + 2)])
control_points_trans = VGroup(*[Dot(point=[self.mesh_trans[0, i, j] * self.K + self.B,
self.mesh_trans[1, i, j] * self.K + self.B,
0], radius=0.05 * self.delta * self.K)
for i in range(self.mesh_size + 2) for j in range(self.mesh_size + 2)])
control_line_width = 1.5
control_lines = VGroup(
*[Line(np.append(self.mesh[:, i, j] * self.K + self.B, 0),
np.append(self.mesh[:, i + 1, j] * self.K + self.B, 0), stroke_width=control_line_width)
for i in range(self.mesh_size + 1) for j in range(self.mesh_size + 2)],
*[Line(np.append(self.mesh[:, i, j] * self.K + self.B, 0),
np.append(self.mesh[:, i, j + 1] * self.K + self.B, 0), stroke_width=control_line_width)
for i in range(self.mesh_size + 2) for j in range(self.mesh_size + 1)]
)
control_lines_trans = VGroup(
*[Line(np.append(self.mesh_trans[:, i, j] * self.K + self.B, 0),
np.append(self.mesh_trans[:, i + 1, j] * self.K + self.B, 0), stroke_width=control_line_width)
for i in range(self.mesh_size + 1) for j in range(self.mesh_size + 2)],
*[Line(np.append(self.mesh_trans[:, i, j] * self.K + self.B, 0),
np.append(self.mesh_trans[:, i, j + 1] * self.K + self.B, 0), stroke_width=control_line_width)
for i in range(self.mesh_size + 2) for j in range(self.mesh_size + 1)]
)
control_grid = VGroup(control_lines, control_lines_trans)
control_grid.set_color("#70c3ff")
points = [x * RIGHT + y * UP
for x in
np.arange(self.AXIS_MIN + 0.25 * self.delta * self.K,
-self.AXIS_MIN - 0.24 * self.delta * self.K,
0.5 * self.delta * self.K)
for y in
np.arange(self.AXIS_MIN + 0.25 * self.delta * self.K,
-self.AXIS_MIN - 0.24 * self.delta * self.K,
0.5 * self.delta * self.K)
]
vectors = VGroup(*[Vector([0, 0, 0]).shift(point) for point in points])
scale_factor = 2
vectors_trans = VGroup(*[Vector(scale_factor *
(naive_transformation(point, self.mesh_trans, self.delta, self.B, self.K) - point),
**self.CONFIG).shift(point)
for point in points])
vectors_trans.set_color_by_gradient(RED, ORANGE, YELLOW, GREEN, BLUE)
self.add(vectors, control_lines, control_points)
vectors.save_state()
control_lines.save_state()
control_points.save_state()
self.play(
Transform(control_lines, control_lines_trans),
Transform(control_points, control_points_trans),
Transform(vectors, vectors_trans),
run_time=1,
)
self.play(Restore(vectors, run_time=1),
Restore(control_points, run_time=1),
Restore(control_lines, run_time=1))
| 45.461728
| 123
| 0.519715
| 2,596
| 18,412
| 3.478428
| 0.05624
| 0.107198
| 0.077076
| 0.082835
| 0.917054
| 0.90897
| 0.888151
| 0.858472
| 0.858472
| 0.846733
| 0
| 0.040617
| 0.344775
| 18,412
| 404
| 124
| 45.574257
| 0.707891
| 0.001249
| 0
| 0.707042
| 0
| 0
| 0.0254
| 0.00805
| 0
| 0
| 0
| 0
| 0.011268
| 1
| 0.03662
| false
| 0
| 0.002817
| 0
| 0.064789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d55825e8cbafe2317df581c1159a30f97c73e99e
| 2,741
|
py
|
Python
|
pyaz/webapp/webjob/triggered/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/webapp/webjob/triggered/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/webapp/webjob/triggered/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
Allows management operations of triggered webjobs on a web app.
'''
from .... pyaz_utils import _call_az
def list(name, resource_group, slot=None):
'''
List all triggered webjobs hosted on a web app.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- slot -- the name of the slot. Default to the productions slot if not specified
'''
return _call_az("az webapp webjob triggered list", locals())
def remove(name, resource_group, webjob_name, slot=None):
'''
Delete a specific triggered webjob hosted on a web app.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- webjob_name -- The name of the webjob
Optional Parameters:
- slot -- the name of the slot. Default to the productions slot if not specified
'''
return _call_az("az webapp webjob triggered remove", locals())
def run(name, resource_group, webjob_name, slot=None):
'''
Run a specific triggered webjob hosted on a web app.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- webjob_name -- The name of the webjob
Optional Parameters:
- slot -- the name of the slot. Default to the productions slot if not specified
'''
return _call_az("az webapp webjob triggered run", locals())
def log(name, resource_group, webjob_name, slot=None):
'''
Get history of a specific triggered webjob hosted on a web app.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- webjob_name -- The name of the webjob
Optional Parameters:
- slot -- the name of the slot. Default to the productions slot if not specified
'''
return _call_az("az webapp webjob triggered log", locals())
| 42.828125
| 163
| 0.711054
| 396
| 2,741
| 4.858586
| 0.143939
| 0.046778
| 0.051455
| 0.074844
| 0.888254
| 0.888254
| 0.888254
| 0.83368
| 0.83368
| 0.83368
| 0
| 0
| 0.207588
| 2,741
| 63
| 164
| 43.507937
| 0.88582
| 0.745713
| 0
| 0
| 0
| 0
| 0.231343
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0.111111
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
d55a3048e635cc22fd4ae432615ff19c9ff90231
| 220
|
py
|
Python
|
bank.py
|
StiffWriter044/Bank_project
|
dea5232069597af3405f88b260493582779866f6
|
[
"MIT"
] | null | null | null |
bank.py
|
StiffWriter044/Bank_project
|
dea5232069597af3405f88b260493582779866f6
|
[
"MIT"
] | null | null | null |
bank.py
|
StiffWriter044/Bank_project
|
dea5232069597af3405f88b260493582779866f6
|
[
"MIT"
] | null | null | null |
class Banca:
def __init__(self, nome_banca):
self.nome_banca = nome_banca
clienti = []
conti_correnti = []
def __repr__(self):
return "Banca({0})".format(self.nome_banca)
| 24.444444
| 51
| 0.577273
| 25
| 220
| 4.56
| 0.52
| 0.315789
| 0.342105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006536
| 0.304545
| 220
| 9
| 51
| 24.444444
| 0.738562
| 0
| 0
| 0
| 0
| 0
| 0.045249
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
d5780ffee056a9d48016e20dc611e4b1cc5b3820
| 29,315
|
py
|
Python
|
graphsage/minibatch.py
|
enricovian/GraphSAGE
|
0cdda29dbc075fb8f3441c15638d1b06de992a57
|
[
"MIT"
] | null | null | null |
graphsage/minibatch.py
|
enricovian/GraphSAGE
|
0cdda29dbc075fb8f3441c15638d1b06de992a57
|
[
"MIT"
] | null | null | null |
graphsage/minibatch.py
|
enricovian/GraphSAGE
|
0cdda29dbc075fb8f3441c15638d1b06de992a57
|
[
"MIT"
] | null | null | null |
from __future__ import division
from __future__ import print_function
import numpy as np
np.random.seed(123)
class EdgeMinibatchIterator(object):
""" This minibatch iterator iterates over batches of sampled edges or
random pairs of co-occuring edges.
G -- networkx graph
id2idx -- dict mapping node ids to index in feature tensor
placeholders -- tensorflow placeholders object
context_pairs -- if not none, then a list of co-occuring node pairs (from random walks)
batch_size -- size of the minibatches
max_degree -- maximum size of the downsampled adjacency lists
n2v_retrain -- signals that the iterator is being used to add new embeddings to a n2v model
fixed_n2v -- signals that the iterator is being used to retrain n2v with only existing nodes as context
"""
def __init__(self, G, id2idx,
placeholders, context_pairs=None, batch_size=100, max_degree=25,
n2v_retrain=False, fixed_n2v=False,
**kwargs):
self.G = G
self.nodes = G.nodes()
self.id2idx = id2idx
self.placeholders = placeholders
self.batch_size = batch_size
self.max_degree = max_degree
self.batch_num = 0
self.nodes = np.random.permutation(G.nodes())
self.adj, self.deg = self.construct_adj()
self.test_adj = self.construct_test_adj()
if context_pairs is None:
edges = G.edges()
else:
edges = context_pairs
self.train_edges = self.edges = np.random.permutation(edges)
if not n2v_retrain:
self.train_edges = self._remove_isolated(self.train_edges)
self.val_edges = [e for e in G.edges() if G[e[0]][e[1]]['train_removed']]
else:
if fixed_n2v:
self.train_edges = self.val_edges = self._n2v_prune(self.edges)
else:
self.train_edges = self.val_edges = self.edges
print(len([n for n in G.nodes() if not G.node[n]['test'] and not G.node[n]['val']]), 'train nodes')
print(len([n for n in G.nodes() if G.node[n]['test'] or G.node[n]['val']]), 'test nodes')
self.val_set_size = len(self.val_edges)
def _n2v_prune(self, edges):
is_val = lambda n : self.G.node[n]["val"] or self.G.node[n]["test"]
return [e for e in edges if not is_val(e[1])]
def _remove_isolated(self, edge_list):
new_edge_list = []
missing = 0
for n1, n2 in edge_list:
if not n1 in self.G.node or not n2 in self.G.node:
missing += 1
continue
if (self.deg[self.id2idx[n1]] == 0 or self.deg[self.id2idx[n2]] == 0) \
and (not self.G.node[n1]['test'] or self.G.node[n1]['val']) \
and (not self.G.node[n2]['test'] or self.G.node[n2]['val']):
continue
else:
new_edge_list.append((n1,n2))
print("Unexpected missing:", missing)
return new_edge_list
def construct_adj(self):
adj = len(self.id2idx)*np.ones((len(self.id2idx)+1, self.max_degree))
deg = np.zeros((len(self.id2idx),))
for nodeid in self.G.nodes():
if self.G.node[nodeid]['test'] or self.G.node[nodeid]['val']:
continue
neighbors = np.array([self.id2idx[neighbor]
for neighbor in self.G.neighbors(nodeid)
if (not self.G[nodeid][neighbor]['train_removed'])])
deg[self.id2idx[nodeid]] = len(neighbors)
if len(neighbors) == 0:
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[self.id2idx[nodeid], :] = neighbors
return adj, deg
def construct_test_adj(self):
adj = len(self.id2idx)*np.ones((len(self.id2idx)+1, self.max_degree))
for nodeid in self.G.nodes():
neighbors = np.array([self.id2idx[neighbor]
for neighbor in self.G.neighbors(nodeid)])
if len(neighbors) == 0:
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[self.id2idx[nodeid], :] = neighbors
return adj
def end(self):
return self.batch_num * self.batch_size >= len(self.train_edges)
def batch_feed_dict(self, batch_edges):
batch1 = []
batch2 = []
for node1, node2 in batch_edges:
batch1.append(self.id2idx[node1])
batch2.append(self.id2idx[node2])
feed_dict = dict()
feed_dict.update({self.placeholders['batch_size'] : len(batch_edges)})
feed_dict.update({self.placeholders['batch1']: batch1})
feed_dict.update({self.placeholders['batch2']: batch2})
return feed_dict
def next_minibatch_feed_dict(self):
start_idx = self.batch_num * self.batch_size
self.batch_num += 1
end_idx = min(start_idx + self.batch_size, len(self.train_edges))
batch_edges = self.train_edges[start_idx : end_idx]
return self.batch_feed_dict(batch_edges)
def num_training_batches(self):
return len(self.train_edges) // self.batch_size + 1
def val_feed_dict(self, size=None):
edge_list = self.val_edges
if size is None:
return self.batch_feed_dict(edge_list)
else:
ind = np.random.permutation(len(edge_list))
val_edges = [edge_list[i] for i in ind[:min(size, len(ind))]]
return self.batch_feed_dict(val_edges)
def incremental_val_feed_dict(self, size, iter_num):
edge_list = self.val_edges
val_edges = edge_list[iter_num*size:min((iter_num+1)*size,
len(edge_list))]
return self.batch_feed_dict(val_edges), (iter_num+1)*size >= len(self.val_edges), val_edges
def incremental_embed_feed_dict(self, size, iter_num):
node_list = self.nodes
val_nodes = node_list[iter_num*size:min((iter_num+1)*size,
len(node_list))]
val_edges = [(n,n) for n in val_nodes]
return self.batch_feed_dict(val_edges), (iter_num+1)*size >= len(node_list), val_edges
def label_val(self):
train_edges = []
val_edges = []
for n1, n2 in self.G.edges():
if (self.G.node[n1]['val'] or self.G.node[n1]['test']
or self.G.node[n2]['val'] or self.G.node[n2]['test']):
val_edges.append((n1,n2))
else:
train_edges.append((n1,n2))
return train_edges, val_edges
def shuffle(self):
""" Re-shuffle the training set.
Also reset the batch number.
"""
self.train_edges = np.random.permutation(self.train_edges)
self.nodes = np.random.permutation(self.nodes)
self.batch_num = 0
class NodeMinibatchIterator(object):
"""
This minibatch iterator iterates over nodes for supervised learning.
G -- networkx graph
id2idx -- dict mapping node ids to integer values indexing feature tensor
placeholders -- standard tensorflow placeholders object for feeding
label_map -- map from node ids to class values (integer or list)
num_classes -- number of output classes
batch_size -- size of the minibatches
max_degree -- maximum size of the downsampled adjacency lists
"""
def __init__(self, G, id2idx,
placeholders, label_map, num_classes,
batch_size=100, max_degree=25,
**kwargs):
self.G = G
self.nodes = G.nodes()
self.id2idx = id2idx
self.placeholders = placeholders
self.batch_size = batch_size
self.max_degree = max_degree
self.batch_num = 0
self.label_map = label_map
self.num_classes = num_classes
self.adj, self.deg = self.construct_adj()
self.test_adj = self.construct_test_adj()
self.val_nodes = [n for n in self.G.nodes() if self.G.node[n]['val']]
self.test_nodes = [n for n in self.G.nodes() if self.G.node[n]['test']]
self.no_train_nodes_set = set(self.val_nodes + self.test_nodes)
self.train_nodes = set(G.nodes()).difference(self.no_train_nodes_set)
# don't train on nodes that only have edges to test set
self.train_nodes = [n for n in self.train_nodes if self.deg[id2idx[n]] > 0]
def _make_label_vec(self, node):
label = self.label_map[node]
if isinstance(label, list):
label_vec = np.array(label)
else:
label_vec = np.zeros((self.num_classes))
class_ind = self.label_map[node]
label_vec[class_ind] = 1
return label_vec
def construct_adj(self):
adj = len(self.id2idx)*np.ones((len(self.id2idx)+1, self.max_degree))
deg = np.zeros((len(self.id2idx),))
for nodeid in self.G.nodes():
if self.G.node[nodeid]['test'] or self.G.node[nodeid]['val']:
continue
neighbors = np.array([self.id2idx[neighbor]
for neighbor in self.G.neighbors(nodeid)
if (not self.G[nodeid][neighbor]['train_removed'])])
deg[self.id2idx[nodeid]] = len(neighbors)
if len(neighbors) == 0:
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[self.id2idx[nodeid], :] = neighbors
return adj, deg
def construct_test_adj(self):
adj = len(self.id2idx)*np.ones((len(self.id2idx)+1, self.max_degree))
for nodeid in self.G.nodes():
neighbors = np.array([self.id2idx[neighbor]
for neighbor in self.G.neighbors(nodeid)])
if len(neighbors) == 0:
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[self.id2idx[nodeid], :] = neighbors
return adj
def end(self):
return self.batch_num * self.batch_size >= len(self.train_nodes)
def batch_feed_dict(self, batch_nodes, val=False):
batch1id = batch_nodes
batch1 = [self.id2idx[n] for n in batch1id]
labels = np.vstack([self._make_label_vec(node) for node in batch1id])
feed_dict = dict()
feed_dict.update({self.placeholders['batch_size'] : len(batch1)})
feed_dict.update({self.placeholders['batch']: batch1})
feed_dict.update({self.placeholders['labels']: labels})
return feed_dict, labels
def node_val_feed_dict(self, size=None, test=False):
if test:
val_nodes = self.test_nodes
else:
val_nodes = self.val_nodes
if not size is None:
val_nodes = np.random.choice(val_nodes, size, replace=True)
# add a dummy neighbor
ret_val = self.batch_feed_dict(val_nodes)
return ret_val[0], ret_val[1]
def incremental_node_val_feed_dict(self, size, iter_num, test=False):
if test:
val_nodes = self.test_nodes
else:
val_nodes = self.val_nodes
val_node_subset = val_nodes[iter_num*size:min((iter_num+1)*size,
len(val_nodes))]
# add a dummy neighbor
ret_val = self.batch_feed_dict(val_node_subset)
return ret_val[0], ret_val[1], (iter_num+1)*size >= len(val_nodes), val_node_subset
def num_training_batches(self):
return len(self.train_nodes) // self.batch_size + 1
def next_minibatch_feed_dict(self):
start_idx = self.batch_num * self.batch_size
self.batch_num += 1
end_idx = min(start_idx + self.batch_size, len(self.train_nodes))
batch_nodes = self.train_nodes[start_idx : end_idx]
return self.batch_feed_dict(batch_nodes)
def incremental_embed_feed_dict(self, size, iter_num):
node_list = self.nodes
val_nodes = node_list[iter_num*size:min((iter_num+1)*size,
len(node_list))]
return self.batch_feed_dict(val_nodes), (iter_num+1)*size >= len(node_list), val_nodes
def shuffle(self):
""" Re-shuffle the training set.
Also reset the batch number.
"""
self.train_nodes = np.random.permutation(self.train_nodes)
self.batch_num = 0
class SupervisedEdgeMinibatchIterator(object):
""" This minibatch iterator iterates over batches of sampled edges or
random pairs of co-occuring edges.
NB: the functions without suffix '_sup' or '_unsup' consider all nodes
regardless of the labelling (eventually returning an all-0 class list for
unlabeled entries).
Instead '_sup' methods consider exclusively labeled nodes and '_unsup'
methods exclusively unlabeled ones.
G -- networkx graph
id2idx -- dict mapping node ids to index in feature tensor
placeholders -- tensorflow placeholders object
label_map -- map from node ids to class values (integer or list)
num_classes -- number of output classes
context_pairs -- if not none, then a list of co-occuring node pairs (from random walks)
batch_size -- size of the minibatches
max_degree -- maximum size of the downsampled adjacency lists
n2v_retrain -- signals that the iterator is being used to add new embeddings to a n2v model
fixed_n2v -- signals that the iterator is being used to retrain n2v with only existing nodes as context
complete_validation -- if true the validation graph contains train nodes as well
"""
def __init__(self, G, id2idx, placeholders, label_map, num_classes,
context_pairs=None, batch_size=100, max_degree=25, n2v_retrain=False,
fixed_n2v=False, complete_validation=True, **kwargs):
self.G = G
self.nodes = G.nodes()
self.id2idx = id2idx
self.placeholders = placeholders
self.batch_size = batch_size
self.max_degree = max_degree
self.batch_num = 0
self.batch_num_sup = 0
self.batch_num_unsup = 0
self.label_map = label_map
self.num_classes = num_classes
self.labeled_nodes = [n for n in G.nodes() if G.node[n]['labeled']]
self.unlabeled_nodes = [n for n in G.nodes() if not G.node[n]['labeled']]
self.nodes = np.random.permutation(G.nodes())
self.adj, self.deg = self.construct_adj()
self.test_adj = self.construct_test_adj()
classes_dict = {}
for node in self.labeled_nodes:
try:
classes_dict[np.argmax(self.label_map[node])].append(node)
except KeyError as e:
classes_dict[np.argmax(self.label_map[node])] = [node]
self.label_adj, self.label_deg = self.construct_label_adj(classes_dict)
self.test_label_adj = self.construct_test_label_adj(classes_dict)
train_nodes = [n for n in G.nodes() if not G.node[n]['test'] and not G.node[n]['val']]
test_nodes = [n for n in G.nodes() if G.node[n]['test'] or G.node[n]['val']]
if context_pairs is None:
G_train = G.subgraph(train_nodes)
train_edges = [e for e in G_train.edges()]
else:
train_edges = context_pairs
self.train_edges = np.random.permutation(train_edges)
self.train_edges, missing = self._remove_isolated(self.train_edges) # remove edges referring to missing nodes
print("Unexpected missing nodes:", missing)
self.train_edges_sup = [edge for edge in self.train_edges if G.node[edge[0]]['labeled']]
self.train_edges_unsup = [edge for edge in self.train_edges if not G.node[edge[0]]['labeled']]
# if complete_validation is true, the validation graph contains train nodes as well
if complete_validation:
self.val_edges = G.edges()
else:
self.val_edges = [e for e in G.edges() if e[0] in test_nodes]
# Put the validation nodes always as first element (DOES MESS UP DIRECTED GRAPHS!)
self.val_edges.extend([(e[1], e[0]) for e in G.edges() if e[1] in test_nodes])
self.val_edges_sup = [edge for edge in self.val_edges if G.node[edge[0]]['labeled']]
self.val_edges_unsup = [edge for edge in self.val_edges if not G.node[edge[0]]['labeled']]
self.val_set_size = len(self.val_edges)
print(len(self.train_edges),'train edges -',len(self.train_edges_sup),
'supervised and',len(self.train_edges_unsup),'unsupervised')
print(len(self.val_edges),'validation edges -',len(self.val_edges_sup),
'supervised and',len(self.val_edges_unsup),'unsupervised')
print(len(train_nodes), 'train nodes -',
len([n for n in train_nodes if G.node[n]['labeled']]), 'labeled and',
len([n for n in train_nodes if not G.node[n]['labeled']]), 'unlabeled')
print(len(test_nodes), 'test nodes -',
len([n for n in test_nodes if G.node[n]['labeled']]), 'labeled and',
len([n for n in test_nodes if not G.node[n]['labeled']]), 'unlabeled')
def _n2v_prune(self, edges):
is_val = lambda n : self.G.node[n]["val"] or self.G.node[n]["test"]
return [e for e in edges if not is_val(e[1])]
def _remove_isolated(self, edge_list):
new_edge_list = []
missing = 0
for n1, n2 in edge_list:
if not n1 in self.G.node or not n2 in self.G.node:
missing += 1
continue
if (self.deg[self.id2idx[n1]] == 0 or self.deg[self.id2idx[n2]] == 0) \
and (not self.G.node[n1]['test'] or self.G.node[n1]['val']) \
and (not self.G.node[n2]['test'] or self.G.node[n2]['val']):
continue
else:
new_edge_list.append((n1,n2))
return new_edge_list, missing
def _make_label_vec(self, node):
label = self.label_map[node]
if isinstance(label, list):
label_vec = np.array(label)
else:
label_vec = np.zeros((self.num_classes))
class_ind = self.label_map[node]
label_vec[class_ind] = 1
return label_vec
def construct_adj(self):
adj = len(self.id2idx)*np.ones((len(self.id2idx)+1, self.max_degree))
deg = np.zeros((len(self.id2idx),))
for nodeid in self.G.nodes():
if self.G.node[nodeid]['test'] or self.G.node[nodeid]['val']:
continue
neighbors = np.array([self.id2idx[neighbor]
for neighbor in self.G.neighbors(nodeid)
if (not self.G[nodeid][neighbor]['train_removed'])])
deg[self.id2idx[nodeid]] = len(neighbors)
if len(neighbors) == 0:
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[self.id2idx[nodeid], :] = neighbors
return adj, deg
def construct_test_adj(self):
adj = len(self.id2idx)*np.ones((len(self.id2idx)+1, self.max_degree))
for nodeid in self.G.nodes():
neighbors = np.array([self.id2idx[neighbor]
for neighbor in self.G.neighbors(nodeid)])
if len(neighbors) == 0:
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[self.id2idx[nodeid], :] = neighbors
return adj
def construct_label_adj(self, classes_dict):
"""
Returns a matrix associating every node with nodes of the same class.
Nodes not belonging to any class are simply associated to neighborhoods
instead.
"""
adj = self.adj # base values are the same as adjacency matrix
deg = np.zeros((len(self.id2idx),))
for nodeid in self.labeled_nodes:
if self.G.node[nodeid]['test'] or self.G.node[nodeid]['val']:
continue
neighbors = np.array([id != nodeid for id in classes_dict[np.argmax(self.label_map[nodeid])]])
deg[self.id2idx[nodeid]] = len(neighbors)
if len(neighbors) == 0:
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[self.id2idx[nodeid], :] = neighbors
return adj, deg
def construct_test_label_adj(self, classes_dict):
"""
Returns a matrix associating every node with nodes of the same class.
Nodes not belonging to any class are simply associated to neighborhoods
instead.
"""
adj = self.test_adj # base values are the same as adjacency matrix
for nodeid in self.G.nodes():
neighbors = np.array([id != nodeid for id in classes_dict[np.argmax(self.label_map[nodeid])]])
if len(neighbors) == 0:
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[self.id2idx[nodeid], :] = neighbors
return adj
def end(self):
return self.batch_num * self.batch_size >= len(self.train_edges)
def end_sup(self):
return self.batch_num_sup * self.batch_size >= len(self.train_edges_sup)
def end_unsup(self):
return self.batch_num_unsup * self.batch_size >= len(self.train_edges_unsup)
def batch_feed_dict(self, batch_edges, duplicates=True):
""" Construct the feed_dict for a batch of edges.
The duplicate flag determines whether to consider the same node more than once.
"""
batch1 = []
batch2 = []
for node1, node2 in batch_edges:
batch1.append(self.id2idx[node1])
batch2.append(self.id2idx[node2])
if not duplicates:
# remove duplicate nodes and update the seen nodes list
nodes, unique_idx = np.unique(batch1, return_index=True) # remove duplicates
nodes_unique_idx = [(n, i) for (n, i) in zip(nodes, unique_idx) if n not in self.seen_nodes] # remove nodes seen in previous batches
if len(nodes_unique_idx) == 0:
# if there are no new nodes, return None
return None, None
nodes, unique_idx = zip(*nodes_unique_idx) # unzip the tuples to lists
self.seen_nodes.extend(nodes)
batch1 = [n for i, n in enumerate(batch1) if i in unique_idx]
batch2 = [n for i, n in enumerate(batch2) if i in unique_idx]
batch_edges = [n for i, n in enumerate(batch_edges) if i in unique_idx]
labels = np.vstack([self._make_label_vec(node1) for node1, node2 in batch_edges])
feed_dict = dict()
feed_dict.update({self.placeholders['batch_size'] : len(batch_edges)})
feed_dict.update({self.placeholders['batch']: batch1})
feed_dict.update({self.placeholders['batch_pos']: batch2})
feed_dict.update({self.placeholders['labels']: labels})
return feed_dict, labels
def next_minibatch_feed_dict(self):
start_idx = self.batch_num * self.batch_size
self.batch_num += 1
end_idx = min(start_idx + self.batch_size, len(self.train_edges))
batch_edges = self.train_edges[start_idx : end_idx]
return self.batch_feed_dict(batch_edges)
def next_minibatch_feed_dict_sup(self):
start_idx = self.batch_num_sup * self.batch_size
self.batch_num_sup += 1
end_idx = min(start_idx + self.batch_size, len(self.train_edges_sup))
batch_edges = self.train_edges_sup[start_idx : end_idx]
return self.batch_feed_dict(batch_edges)
def next_minibatch_feed_dict_unsup(self):
start_idx = self.batch_num_unsup * self.batch_size
self.batch_num_unsup += 1
end_idx = min(start_idx + self.batch_size, len(self.train_edges_unsup))
batch_edges = self.train_edges_unsup[start_idx : end_idx]
return self.batch_feed_dict(batch_edges)
def num_training_batches(self):
return len(self.train_edges) // self.batch_size + 1
def val_feed_dict(self, size=None):
edge_list = self.val_edges
if size is None:
return self.batch_feed_dict(edge_list)
else:
ind = np.random.permutation(len(edge_list))
val_edges = [edge_list[i] for i in ind[:min(size, len(ind))]]
return self.batch_feed_dict(val_edges)
def val_feed_dict_sup(self, size=None):
edge_list = self.val_edges_sup
if size is None:
return self.batch_feed_dict(edge_list)
else:
ind = np.random.permutation(len(edge_list))
val_edges = [edge_list[i] for i in ind[:min(size, len(ind))]]
return self.batch_feed_dict(val_edges)
def val_feed_dict_unsup(self, size=None):
edge_list = self.val_edges_unsup
if size is None:
return self.batch_feed_dict(edge_list)
else:
ind = np.random.permutation(len(edge_list))
val_edges = [edge_list[i] for i in ind[:min(size, len(ind))]]
return self.batch_feed_dict(val_edges)
def incremental_val_feed_dict(self, size, iter_num):
edge_list = self.val_edges
val_edges = edge_list[iter_num*size:min((iter_num+1)*size,
len(edge_list))]
feed_dict, labels = self.batch_feed_dict(val_edges)
return feed_dict, labels, (iter_num+1)*size >= len(self.val_edges), val_edges
def incremental_val_feed_dict_sup(self, size, iter_num, duplicates=True):
"""
The duplicate flag determines whether to consider the same node more than once.
"""
if iter_num == 0:
self.seen_nodes = []
edge_list = self.val_edges_sup
val_edges = edge_list[iter_num*size:min((iter_num+1)*size,
len(edge_list))]
feed_dict, labels = self.batch_feed_dict(val_edges, duplicates=duplicates)
return feed_dict, labels, (iter_num+1)*size >= len(self.val_edges_sup), val_edges
def incremental_val_feed_dict_unsup(self, size, iter_num):
edge_list = self.val_edges_unsup
val_edges = edge_list[iter_num*size:min((iter_num+1)*size,
len(edge_list))]
feed_dict, labels = self.batch_feed_dict(val_edges)
return feed_dict, labels, (iter_num+1)*size >= len(self.val_edges_unsup), val_edges
def incremental_embed_feed_dict(self, size, iter_num):
node_list = self.nodes
if size < 0: # a negative size means the whole set is processed at once
size = len(node_list)
val_nodes = node_list[iter_num*size:min((iter_num+1)*size,
len(node_list))]
val_edges = [(n,n) for n in val_nodes]
feed_dict, labels = self.batch_feed_dict(val_edges)
return feed_dict, labels, (iter_num+1)*size >= len(node_list), val_edges
def label_val(self):
train_edges = []
val_edges = []
for n1, n2 in self.G.edges():
if (self.G.node[n1]['val'] or self.G.node[n1]['test']
or self.G.node[n2]['val'] or self.G.node[n2]['test']):
val_edges.append((n1,n2))
else:
train_edges.append((n1,n2))
return train_edges, val_edges
def shuffle(self):
""" Re-shuffle the training set.
Also reset the batch number.
"""
self.train_edges = np.random.permutation(self.train_edges)
self.train_edges_sup = np.random.permutation(self.train_edges_sup)
self.train_edges_unsup = np.random.permutation(self.train_edges_unsup)
# self.val_edges = np.random.permutation(self.val_edges)
# self.val_edges_sup = np.random.permutation(self.val_edges_sup)
# self.val_edges_unsup = np.random.permutation(self.val_edges_unsup)
self.nodes = np.random.permutation(self.nodes)
self.batch_num = self.batch_num_sup = self.batch_num_unsup = 0
| 44.082707
| 144
| 0.622719
| 4,090
| 29,315
| 4.270416
| 0.06088
| 0.036585
| 0.030516
| 0.040307
| 0.861789
| 0.835967
| 0.782835
| 0.753235
| 0.719455
| 0.699072
| 0
| 0.011577
| 0.269282
| 29,315
| 664
| 145
| 44.149096
| 0.803791
| 0.123077
| 0
| 0.708911
| 0
| 0
| 0.021397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.10099
| false
| 0
| 0.005941
| 0.015842
| 0.211881
| 0.017822
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
89341c6e869cc856d2fb785ff4b6174c1ba75917
| 102,482
|
py
|
Python
|
pyqmri/operator.py
|
agahkarakuzu/PyQMRI
|
30871de4cc15dee573f9fa71990b1a4331a690f2
|
[
"Apache-2.0"
] | 1
|
2021-09-15T23:37:29.000Z
|
2021-09-15T23:37:29.000Z
|
pyqmri/operator.py
|
agahkarakuzu/PyQMRI
|
30871de4cc15dee573f9fa71990b1a4331a690f2
|
[
"Apache-2.0"
] | null | null | null |
pyqmri/operator.py
|
agahkarakuzu/PyQMRI
|
30871de4cc15dee573f9fa71990b1a4331a690f2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Module holding the classes for different linear Operators."""
from abc import ABC, abstractmethod
import pyopencl.array as clarray
import numpy as np
from pyqmri.transforms import PyOpenCLnuFFT as CLnuFFT
import pyqmri.streaming as streaming
class Operator(ABC):
"""Abstract base class for linear Operators used in the optimization.
This class serves as the base class for all linear operators used in
the varous optimization algorithms. it requires to implement a forward
and backward application in and out of place.
Parameters
----------
par : dict
A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
Attributes
----------
NScan : int
Number of total measurements (Scans)
NC : int
Number of complex coils
NSlice : int
Number ofSlices
dimX : int
X dimension of the parameter maps
dimY : int
Y dimension of the parameter maps
N : int
N number of samples per readout
Nproj : int
Number of rreadouts
unknowns_TGV : int
Number of unknowns which should be regularized with TGV. It is assumed
that these occure first in the unknown vector. Currently at least 1
TGV unknown is required.
unknowns_H1 : int
Number of unknowns which should be regularized with H1. It is assumed
that these occure after all TGV unknowns in the unknown vector.
Currently this number can be zero which implies that no H1
regularization is used.
unknowns : int
The sum of TGV and H1 unknowns.
ctx : list of PyOpenCL.Context
The context for the PyOpenCL computations. If streamed operations are
used a list of ctx is required. One for each computation device.
queue : list of PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels. If streamed operations
are used a list of queues is required. Four for each computation
device.
dz : float
The ratio between the physical X,Y dimensions vs the Z dimension.
This allows for anisotrpic regularization along the Z dimension.
num_dev : int
Number of compute devices
NUFFT : PyQMRI.transforms.PyOpenCLnuFFT
A PyOpenCLnuFFT object to perform forward and backword transformations
from image to k-space and vice versa.
prg : PyOpenCL.Program
The PyOpenCL program containing all compiled kernels.
self.DTYPE : numpy.dtype
Complex working precission. Currently single precission only.
self.DTYPE_real : numpy.dtype
Real working precission. Currently single precission only.
"""
def __init__(self, par, prg, DTYPE=np.complex64, DTYPE_real=np.float32):
self.NSlice = par["NSlice"]
self.NScan = par["NScan"]
self.dimX = par["dimX"]
self.dimY = par["dimY"]
self.N = par["N"]
self.NC = par["NC"]
self.Nproj = par["Nproj"]
self.ctx = par["ctx"]
self.queue = par["queue"]
self.unknowns_TGV = par["unknowns_TGV"]
self.unknowns_H1 = par["unknowns_H1"]
self.unknowns = par["unknowns"]
self._dz = par["dz"]
self.num_dev = len(par["num_dev"])
self._tmp_result = []
self.NUFFT = []
self.prg = prg
self.DTYPE = DTYPE
self.DTYPE_real = DTYPE_real
self.par_slices = self.NSlice
self._overlap = 0
@abstractmethod
def fwd(self, out, inp, **kwargs):
"""Forward operator application in-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex measurement space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
Returns
-------
PyOpenCL.Event
A PyOpenCL event to wait for.
"""
...
@abstractmethod
def adj(self, out, inp, **kwargs):
"""Adjoint operator application in-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex measurement space data which is used as input.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
...
@abstractmethod
def fwdoop(self, inp, **kwargs):
"""Forward operator application out-of-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
...
@abstractmethod
def adjoop(self, inp, **kwargs):
"""Adjoint operator application out-of-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex measurement space which is used as input.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
...
@staticmethod
def MRIOperatorFactory(par,
prg,
DTYPE,
DTYPE_real,
trafo=False,
imagespace=False,
SMS=False,
streamed=False):
"""MRI forward/adjoint operator factory method.
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
trafo : bool, false
Select between radial (True) or cartesian FFT (false).
imagespace : bool, false
Select between fitting in imagespace (True) or k-space (false).
SMS : bool, false
Select between simulatneous multi-slice reconstruction or standard.
streamed : bool, false
Use standard reconstruction (false) or streaming of memory blocks
to the compute device (true). Only use this if data does not
fit in one block.
Returns
-------
PyQMRI.Operator
A specialized instance of a PyQMRI.Operator to perform forward
and ajoint operations for fitting.
PyQMRI.NUFFT
An instance of the used (nu-)FFT if k-space fitting is performed,
None otherwise.
"""
if streamed:
if imagespace:
op = OperatorImagespaceStreamed(
par, prg,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
FT = None
else:
if SMS:
op = OperatorKspaceSMSStreamed(
par,
prg,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
else:
op = OperatorKspaceStreamed(
par,
prg,
trafo=trafo,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
FT = op.NUFFT
else:
if imagespace:
op = OperatorImagespace(
par, prg[0],
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
FT = None
else:
if SMS:
op = OperatorKspaceSMS(
par,
prg[0],
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
else:
op = OperatorKspace(
par,
prg[0],
trafo=trafo,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
FT = op.NUFFT
return op, FT
@staticmethod
def GradientOperatorFactory(par,
prg,
DTYPE,
DTYPE_real,
streamed=False):
"""Gradient forward/adjoint operator factory method.
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
streamed : bool, false
Use standard reconstruction (false) or streaming of memory blocks
to the compute device (true). Only use this if data does not
fit in one block.
Returns
-------
PyQMRI.Operator
A specialized instance of a PyQMRI.Operator to perform forward
and ajoint gradient calculations.
"""
if streamed:
op = OperatorFiniteGradientStreamed(par,
prg,
DTYPE,
DTYPE_real)
else:
op = OperatorFiniteGradient(par,
prg[0],
DTYPE,
DTYPE_real)
return op
@staticmethod
def SymGradientOperatorFactory(par,
prg,
DTYPE,
DTYPE_real,
streamed=False):
"""Symmetrized Gradient forward/adjoint operator factory method.
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
streamed : bool, false
Use standard reconstruction (false) or streaming of memory blocks
to the compute device (true). Only use this if data does not
fit in one block.
Returns
-------
PyQMRI.Operator
A specialized instance of a PyQMRI.Operator to perform forward
and ajoint symmetriced gradient calculations.
"""
if streamed:
op = OperatorFiniteSymGradientStreamed(par,
prg,
DTYPE,
DTYPE_real)
else:
op = OperatorFiniteSymGradient(par,
prg[0],
DTYPE,
DTYPE_real)
return op
def _defineoperator(self,
functions,
outp,
inp,
reverse_dir=False,
posofnorm=None,
slices=None):
if slices is None:
slices = self.NSlice
return streaming.Stream(
functions,
outp,
inp,
self.par_slices,
self._overlap,
slices,
self.queue,
self.num_dev,
reverse_dir,
posofnorm,
DTYPE=self.DTYPE)
class OperatorImagespace(Operator):
"""Imagespace based Operator.
This class serves as linear operator between parameter and imagespace.
Use this operator if you want to perform complex parameter fitting from
complex image space data without the need of performing FFTs.
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
Attributes
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
"""
def __init__(self, par, prg, DTYPE=np.complex64, DTYPE_real=np.float32):
super().__init__(par, prg, DTYPE, DTYPE_real)
self.queue = self.queue[0]
self.ctx = self.ctx[0]
def fwd(self, out, inp, **kwargs):
"""Forward operator application in-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex measurement space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event
A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
return self.prg.operator_fwd_imagespace(
self.queue, (self.NSlice, self.dimY, self.dimX), None,
out.data, inp[0].data, inp[2].data,
np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=inp[0].events + out.events + wait_for)
def fwdoop(self, inp, **kwargs):
"""Forward operator application out-of-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
tmp_result = clarray.empty(
self.queue, (self.NScan, self.NSlice, self.dimY, self.dimX),
self.DTYPE, "C")
tmp_result.add_event(self.prg.operator_fwd_imagespace(
self.queue, (self.NSlice, self.dimY, self.dimX), None,
tmp_result.data, inp[0].data, inp[2].data,
np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=inp[0].events + wait_for))
return tmp_result
def adj(self, out, inp, **kwargs):
"""Adjoint operator application in-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex measurement space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
return self.prg.operator_ad_imagespace(
out.queue, (self.NSlice, self.dimY, self.dimX), None,
out.data, inp[0].data, inp[2].data,
np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=wait_for + inp[0].events + out.events)
def adjoop(self, inp, **kwargs):
"""Adjoint operator application out-of-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex measurement space which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
out = clarray.empty(
self.queue, (self.unknowns, self.NSlice, self.dimY, self.dimX),
dtype=self.DTYPE)
self.prg.operator_ad_imagespace(
out.queue, (self.NSlice, self.dimY, self.dimX), None,
out.data, inp[0].data, inp[2].data,
np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=wait_for + inp[0].events + out.events).wait()
return out
def adjKyk1(self, out, inp, **kwargs):
"""Apply the linear operator from image space to parameter space.
This method fully implements the combined linear operator
consisting of the data part as well as the TGV regularization part.
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex image space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
return self.prg.update_Kyk1_imagespace(
self.queue, (self.NSlice, self.dimY, self.dimX), None,
out.data, inp[0].data, inp[3].data, inp[1].data,
np.int32(self.NScan),
inp[4].data,
np.int32(self.unknowns),
self.DTYPE_real(self._dz),
wait_for=(inp[0].events + out.events
+ inp[1].events + wait_for))
class OperatorKspace(Operator):
"""k-Space based Operator.
This class serves as linear operator between parameter and k-space.
Use this operator if you want to perform complex parameter fitting from
complex k-space data. The type of fft is defined through the NUFFT object.
The NUFFT object can also be used for simple Cartesian FFTs.
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
trafo : bool, true
Switch between cartesian (false) and non-cartesian FFT (True, default).
Attributes
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
NUFFT : PyQMRI.PyOpenCLnuFFT
The (nu) FFT used for fitting.
"""
def __init__(self, par, prg, DTYPE=np.complex64,
DTYPE_real=np.float32, trafo=True):
super().__init__(par, prg, DTYPE, DTYPE_real)
self.queue = self.queue[0]
self.ctx = self.ctx[0]
self._tmp_result = clarray.empty(
self.queue, (self.NScan, self.NC,
self.NSlice, self.dimY, self.dimX),
self.DTYPE, "C")
if not trafo:
self.Nproj = self.dimY
self.N = self.dimX
self.NUFFT = CLnuFFT.create(self.ctx,
self.queue,
par,
radial=trafo,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
def fwd(self, out, inp, **kwargs):
"""Forward operator application in-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex measurement space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event
A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
self._tmp_result.add_event(
self.prg.operator_fwd(
self.queue,
(self.NSlice, self.dimY, self.dimX),
None,
self._tmp_result.data, inp[0].data,
inp[1].data,
inp[2].data, np.int32(self.NC),
np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=(self._tmp_result.events + inp[0].events
+ wait_for)))
return self.NUFFT.FFT(
out,
self._tmp_result,
wait_for=wait_for +
self._tmp_result.events)
def fwdoop(self, inp, **kwargs):
"""Forward operator application out-of-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
self._tmp_result.add_event(
self.prg.operator_fwd(
self.queue,
(self.NSlice, self.dimY, self.dimX),
None,
self._tmp_result.data, inp[0].data,
inp[1].data,
inp[2].data, np.int32(self.NC),
np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=(self._tmp_result.events + inp[0].events
+ wait_for)))
tmp_sino = clarray.empty(
self.queue,
(self.NScan, self.NC, self.NSlice, self.Nproj, self.N),
self.DTYPE, "C")
tmp_sino.add_event(
self.NUFFT.FFT(tmp_sino, self._tmp_result))
return tmp_sino
def adj(self, out, inp, **kwargs):
"""Adjoint operator application in-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex measurement space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
self._tmp_result.add_event(
self.NUFFT.FFTH(
self._tmp_result, inp[0], wait_for=(wait_for
+ inp[0].events)))
return self.prg.operator_ad(
self.queue, (self.NSlice, self.dimY, self.dimX), None,
out.data, self._tmp_result.data, inp[1].data,
inp[2].data, np.int32(self.NC),
np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=self._tmp_result.events + out.events)
def adjoop(self, inp, **kwargs):
"""Adjoint operator application out-of-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex measurement space which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
self._tmp_result.add_event(
self.NUFFT.FFTH(
self._tmp_result, inp[0], wait_for=(wait_for
+ inp[0].events)))
out = clarray.empty(
self.queue, (self.unknowns, self.NSlice, self.dimY, self.dimX),
dtype=self.DTYPE)
self.prg.operator_ad(
out.queue, (self.NSlice, self.dimY, self.dimX), None,
out.data, self._tmp_result.data, inp[1].data,
inp[2].data, np.int32(self.NC),
np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=(self._tmp_result.events
+ out.events)).wait()
return out
def adjKyk1(self, out, inp, **kwargs):
"""Apply the linear operator from parameter space to k-space.
This method fully implements the combined linear operator
consisting of the data part as well as the TGV regularization part.
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is used as input.
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
self._tmp_result.add_event(
self.NUFFT.FFTH(
self._tmp_result, inp[0], wait_for=(wait_for
+ inp[0].events)))
return self.prg.update_Kyk1(
self.queue, (self.NSlice, self.dimY, self.dimX), None,
out.data, self._tmp_result.data, inp[2].data,
inp[3].data, inp[1].data, np.int32(self.NC),
np.int32(self.NScan),
inp[4].data,
np.int32(self.unknowns), self.DTYPE_real(self._dz),
wait_for=(self._tmp_result.events +
out.events + inp[1].events))
class OperatorKspaceSMS(Operator):
"""k-Space based Operator for SMS reconstruction.
This class serves as linear operator between parameter and k-space.
It implements simultaneous-multi-slice (SMS) reconstruction.
Use this operator if you want to perform complex parameter fitting from
complex k-space data measured with SMS. Currently only Cartesian FFTs are
supported.
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
Attributes
----------
packs : int
Number of SMS packs.
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
NUFFT : PyQMRI.PyOpenCLnuFFT
The (nu) FFT used for fitting.
"""
def __init__(self, par, prg, DTYPE=np.complex64,
DTYPE_real=np.float32):
super().__init__(par, prg, DTYPE, DTYPE_real)
self.queue = self.queue[0]
self.ctx = self.ctx[0]
self.packs = par["packs"]*par["numofpacks"]
self._tmp_result = clarray.empty(
self.queue, (self.NScan, self.NC,
self.NSlice, self.dimY, self.dimX),
self.DTYPE, "C")
self.Nproj = self.dimY
self.N = self.dimX
self.NUFFT = CLnuFFT.create(self.ctx,
self.queue,
par,
radial=False,
SMS=True,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
def fwd(self, out, inp, **kwargs):
"""Forward operator application in-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex measurement space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event
A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
self._tmp_result.add_event(
self.prg.operator_fwd(
self.queue,
(self.NSlice, self.dimY, self.dimX),
None,
self._tmp_result.data, inp[0].data,
inp[1].data,
inp[2].data, np.int32(self.NC),
np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=(self._tmp_result.events + inp[0].events
+ wait_for)))
return self.NUFFT.FFT(
out,
self._tmp_result,
wait_for=self._tmp_result.events + out.events)
def fwdoop(self, inp, **kwargs):
"""Forward operator application out-of-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
self._tmp_result.add_event(
self.prg.operator_fwd(
self.queue,
(self.NSlice, self.dimY, self.dimX),
None,
self._tmp_result.data, inp[0].data,
inp[1].data,
inp[2].data, np.int32(self.NC),
np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=(self._tmp_result.events + inp[0].events
+ wait_for)))
tmp_sino = clarray.empty(
self.queue,
(self.NScan, self.NC, self.packs, self.Nproj, self.N),
self.DTYPE, "C")
tmp_sino.add_event(
self.NUFFT.FFT(tmp_sino, self._tmp_result))
return tmp_sino
def adj(self, out, inp, **kwargs):
"""Adjoint operator application in-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex measurement space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
self._tmp_result.add_event(
self.NUFFT.FFTH(
self._tmp_result, inp[0], wait_for=(wait_for
+ inp[0].events)))
return self.prg.operator_ad(
self.queue, (self.NSlice, self.dimY, self.dimX), None,
out.data, self._tmp_result.data, inp[1].data,
inp[2].data, np.int32(self.NC),
np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=self._tmp_result.events + out.events)
def adjoop(self, inp, **kwargs):
"""Adjoint operator application out-of-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex measurement space which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
self._tmp_result.add_event(
self.NUFFT.FFTH(
self._tmp_result, inp[0], wait_for=(wait_for
+ inp[0].events)))
out = clarray.empty(
self.queue, (self.unknowns, self.NSlice, self.dimY, self.dimX),
dtype=self.DTYPE)
self.prg.operator_ad(
out.queue, (self.NSlice, self.dimY, self.dimX), None,
out.data, self._tmp_result.data, inp[1].data,
inp[2].data, np.int32(self.NC),
np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=self._tmp_result.events + out.events).wait()
return out
def adjKyk1(self, out, inp, **kwargs):
"""Apply the linear operator from parameter space to k-space.
This method fully implements the combined linear operator
consisting of the data part as well as the TGV regularization part.
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is used as input.
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
self._tmp_result.add_event(
self.NUFFT.FFTH(
self._tmp_result, inp[0], wait_for=(wait_for
+ inp[0].events)))
return self.prg.update_Kyk1(
self.queue, (self.NSlice, self.dimY, self.dimX), None,
out.data, self._tmp_result.data, inp[2].data,
inp[3].data, inp[1].data, np.int32(self.NC),
np.int32(self.NScan),
inp[4].data,
np.int32(self.unknowns), self.DTYPE_real(self._dz),
wait_for=(self._tmp_result.events +
out.events + inp[1].events))
class OperatorImagespaceStreamed(Operator):
"""The streamed version of the Imagespace based Operator.
This class serves as linear operator between parameter and imagespace.
All calculations are performed in a streamed fashion.
Use this operator if you want to perform complex parameter fitting from
complex image space data without the need of performing FFTs.
In contrast to non-streaming classes no out of place operations
are implemented.
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
Attributes
----------
overlap : int
Number of slices that overlap between adjacent blocks.
par_slices : int
Number of slices per streamed block
fwdstr : PyQMRI.Stream
The streaming object to perform the forward evaluation
adjstr : PyQMRI.Stream
The streaming object to perform the adjoint evaluation
adjstrKyk1 : PyQMRI.Stream
The streaming object to perform the adjoint evaluation including z1
of the algorithm.
unknown_shape : tuple of int
Size of the parameter maps
data_shape : tuple of int
Size of the data
"""
def __init__(self, par, prg, DTYPE=np.complex64, DTYPE_real=np.float32):
super().__init__(par, prg, DTYPE, DTYPE_real)
par["overlap"] = 1
self._overlap = par["overlap"]
self.par_slices = par["par_slices"]
self.unknown_shape = (self.NSlice, self.unknowns, self.dimY, self.dimX)
coil_shape = []
model_grad_shape = (self.NSlice, self.unknowns,
self.NScan, self.dimY, self.dimX)
self.data_shape = (self.NSlice, self.NScan, self.dimY, self.dimX)
grad_shape = self.unknown_shape + (4,)
self.fwdstr = self._defineoperator(
[self._fwdstreamed],
[self.data_shape],
[[self.unknown_shape,
coil_shape,
model_grad_shape]])
self.adjstrKyk1 = self._defineoperator(
[self._adjstreamedKyk1],
[self.unknown_shape],
[[self.data_shape,
grad_shape,
coil_shape,
model_grad_shape,
self.unknown_shape]])
self.adjstr = self._defineoperator(
[self._adjstreamed],
[self.unknown_shape],
[[self.data_shape,
coil_shape,
model_grad_shape]])
def fwd(self, out, inp, **kwargs):
"""Forward operator application in-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex measurement space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event
A PyOpenCL event to wait for.
"""
self.fwdstr.eval(out, inp)
def fwdoop(self, inp, **kwargs):
"""Forward operator application out-of-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
tmp_result = np.zeros(self.data_shape, dtype=self.DTYPE)
self.fwdstr.eval([tmp_result], inp)
return tmp_result
def adj(self, out, inp, **kwargs):
"""Adjoint operator application in-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex measurement space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
self.adjstr.eval(out, inp)
def adjoop(self, inp, **kwargs):
"""Adjoint operator application out-of-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex measurement space which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
tmp_result = np.zeros(self.unknown_shape, dtype=self.DTYPE)
self.adjstr.eval([tmp_result], inp)
return tmp_result
def adjKyk1(self, out, inp):
"""Apply the linear operator from parameter space to image space.
This method fully implements the combined linear operator
consisting of the data part as well as the TGV regularization part.
Parameters
----------
out : numpy.Array
The complex parameter space data which is used as input.
inp : numpy.Array
The complex parameter space data which is used as input.
"""
self.adjstrKyk1.eval(out, inp)
def _fwdstreamed(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return (self.prg[idx].operator_fwd_imagespace(
self.queue[4*idx+idxq],
(self.par_slices+self._overlap, self.dimY, self.dimX), None,
outp.data, inp[0].data,
inp[2].data,
np.int32(self.NScan), np.int32(self.unknowns),
wait_for=outp.events+inp[0].events+inp[2].events+wait_for))
def _adjstreamedKyk1(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return self.prg[idx].update_Kyk1_imagespace(
self.queue[4*idx+idxq],
(self.par_slices+self._overlap, self.dimY, self.dimX), None,
outp.data, inp[0].data,
inp[3].data,
inp[1].data,
np.int32(self.NScan),
par[0][idx].data, np.int32(self.unknowns),
np.int32(bound_cond), self.DTYPE_real(self._dz),
wait_for=(outp.events+inp[0].events+inp[1].events +
inp[3].events+wait_for))
def _adjstreamed(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return self.prg[idx].operator_ad_imagespace(
self.queue[4*idx+idxq],
(self.par_slices+self._overlap, self.dimY, self.dimX), None,
outp.data, inp[0].data,
inp[2].data,
np.int32(self.NScan), np.int32(self.unknowns),
wait_for=(outp.events+inp[0].events +
inp[2].events+wait_for))
class OperatorKspaceStreamed(Operator):
"""The streamed version of the k-space based Operator.
This class serves as linear operator between parameter and k-space.
All calculations are performed in a streamed fashion.
Use this operator if you want to perform complex parameter fitting from
complex k-space data without the need of performing FFTs.
In contrast to non-streaming classes no out of place operations
are implemented.
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
trafo : bool, true
Switch between cartesian (false) and non-cartesian FFT (True, default).
Attributes
----------
overlap : int
Number of slices that overlap between adjacent blocks.
par_slices : int
Number of slices per streamed block.
fwdstr : PyQMRI.Stream
The streaming object to perform the forward evaluation.
adjstr : PyQMRI.Stream
The streaming object to perform the adjoint evaluation.
adjstrKyk1 : PyQMRI.Stream
The streaming object to perform the adjoint evaluation including z1
of the algorithm.
NUFFT : list of PyQMRI.transforms.PyOpenCLnuFFT
A list of NUFFT objects. One for each context.
FTstr : PyQMRI.Stream
A streamed version of the used (non-uniform) FFT, applied forward.
unknown_shape : tuple of int
Size of the parameter maps
data_shape : tuple of int
Size of the data
"""
def __init__(self, par, prg,
DTYPE=np.complex64, DTYPE_real=np.float32, trafo=True):
super().__init__(par, prg, DTYPE, DTYPE_real)
self._overlap = par["overlap"]
self.par_slices = par["par_slices"]
if not trafo:
self.Nproj = self.dimY
self.N = self.dimX
for j in range(self.num_dev):
for i in range(2):
self._tmp_result.append(
clarray.empty(
self.queue[4*j+i],
(self.par_slices+self._overlap, self.NScan,
self.NC, self.dimY, self.dimX),
self.DTYPE, "C"))
self.NUFFT.append(
CLnuFFT.create(self.ctx[j],
self.queue[4*j+i], par,
radial=trafo,
streamed=True,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real))
self.unknown_shape = (self.NSlice, self.unknowns, self.dimY, self.dimX)
coil_shape = (self.NSlice, self.NC, self.dimY, self.dimX)
model_grad_shape = (self.NSlice, self.unknowns,
self.NScan, self.dimY, self.dimX)
self.data_shape = (self.NSlice, self.NScan, self.NC, self.Nproj,
self.N)
trans_shape = (self.NSlice, self.NScan,
self.NC, self.dimY, self.dimX)
grad_shape = self.unknown_shape + (4,)
self.fwdstr = self._defineoperator(
[self._fwdstreamed],
[self.data_shape],
[[self.unknown_shape,
coil_shape,
model_grad_shape]])
self.adjstrKyk1 = self._defineoperator(
[self._adjstreamedKyk1],
[self.unknown_shape],
[[self.data_shape,
grad_shape,
coil_shape,
model_grad_shape,
self.unknown_shape]])
self.adjstr = self._defineoperator(
[self._adjstreamed],
[self.unknown_shape],
[[self.data_shape,
coil_shape,
model_grad_shape]])
self.FTstr = self._defineoperator(
[self._FT],
[self.data_shape],
[[trans_shape]])
def fwd(self, out, inp, **kwargs):
"""Forward operator application in-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex measurement space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event
A PyOpenCL event to wait for.
"""
self.fwdstr.eval(out, inp)
def fwdoop(self, inp, **kwargs):
"""Forward operator application out-of-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
tmp_result = np.zeros(self.data_shape, dtype=self.DTYPE)
self.fwdstr.eval([tmp_result], inp)
return tmp_result
def adj(self, out, inp, **kwargs):
"""Adjoint operator application in-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex measurement space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
self.adjstr.eval(out, inp)
def adjoop(self, inp, **kwargs):
"""Adjoint operator application out-of-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex measurement space which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
tmp_result = np.zeros(self.unknown_shape, dtype=self.DTYPE)
self.adjstr.eval([tmp_result], inp)
return tmp_result
def adjKyk1(self, out, inp):
"""Apply the linear operator from parameter space to k-space.
This method fully implements the combined linear operator
consisting of the data part as well as the TGV regularization part.
Parameters
----------
out : numpy.Array
The complex parameter space data which is used as input.
inp : numpy.Array
The complex parameter space data which is used as input.
"""
self.adjstrKyk1.eval(out, inp)
def _fwdstreamed(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
self._tmp_result[2*idx+idxq].add_event(self.prg[idx].operator_fwd(
self.queue[4*idx+idxq],
(self.par_slices+self._overlap, self.dimY, self.dimX), None,
self._tmp_result[2*idx+idxq].data, inp[0].data,
inp[1].data,
inp[2].data,
np.int32(self.NC),
np.int32(self.NScan), np.int32(self.unknowns),
wait_for=(self._tmp_result[2*idx+idxq].events +
inp[0].events+wait_for)))
return self.NUFFT[2*idx+idxq].FFT(
outp, self._tmp_result[2*idx+idxq],
wait_for=outp.events+wait_for+self._tmp_result[2*idx+idxq].events)
def _adjstreamedKyk1(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
self._tmp_result[2*idx+idxq].add_event(
self.NUFFT[2*idx+idxq].FFTH(
self._tmp_result[2*idx+idxq], inp[0],
wait_for=(wait_for+inp[0].events +
self._tmp_result[2*idx+idxq].events)))
return self.prg[idx].update_Kyk1(
self.queue[4*idx+idxq],
(self.par_slices+self._overlap, self.dimY, self.dimX), None,
outp.data, self._tmp_result[2*idx+idxq].data,
inp[2].data,
inp[3].data,
inp[1].data, np.int32(self.NC), np.int32(self.NScan),
par[0][idx].data, np.int32(self.unknowns),
np.int32(bound_cond), self.DTYPE_real(self._dz),
wait_for=(
self._tmp_result[2*idx+idxq].events +
outp.events+inp[1].events +
inp[2].events + inp[3].events + wait_for))
def _adjstreamed(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
self._tmp_result[2*idx+idxq].add_event(
self.NUFFT[2*idx+idxq].FFTH(
self._tmp_result[2*idx+idxq], inp[0],
wait_for=(wait_for+inp[0].events +
self._tmp_result[2*idx+idxq].events)))
return self.prg[idx].operator_ad(
self.queue[4*idx+idxq],
(self.par_slices+self._overlap, self.dimY, self.dimX), None,
outp.data, self._tmp_result[2*idx+idxq].data,
inp[1].data,
inp[2].data,
np.int32(self.NC),
np.int32(self.NScan), np.int32(self.unknowns),
wait_for=(self._tmp_result[2*idx+idxq].events +
inp[1].events+inp[2].events+wait_for))
def _FT(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return self.NUFFT[2*idx+idxq].FFT(outp, inp[0])
class OperatorKspaceSMSStreamed(Operator):
"""The streamed version of the k-space based SMS Operator.
This class serves as linear operator between parameter and k-space.
It implements simultaneous-multi-slice (SMS) reconstruction.
All calculations are performed in a streamed fashion.
Use this operator if you want to perform complex parameter fitting from
complex k-space data measured with SMS. Currently only Cartesian FFTs are
supported.
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
Attributes
----------
overlap : int
Number of slices that overlap between adjacent blocks.
par_slices : int
Number of slices per streamed block
packs : int
Number of packs to stream
fwdstr : PyQMRI.Stream
The streaming object to perform the forward evaluation
adjstr : PyQMRI.Stream
The streaming object to perform the adjoint evaluation
NUFFT : list of PyQMRI.transforms.PyOpenCLnuFFT
A list of NUFFT objects. One for each context.
FTstr : PyQMRI.Stream
A streamed version of the used (non-uniform) FFT, applied forward.
FTHstr : PyQMRI.Stream
A streamed version of the used (non-uniform) FFT, applied adjoint.
updateKyk1SMSstreamed
dat_trans_axes : list of int
Order in which the data needs to be transformed during the SMS
reconstruction and streaming.
"""
def __init__(self, par, prg, DTYPE=np.complex64,
DTYPE_real=np.float32):
super().__init__(par, prg, DTYPE, DTYPE_real)
self._overlap = par["overlap"]
self.par_slices = par["par_slices"]
self.packs = par["packs"]*par["numofpacks"]
self.Nproj = self.dimY
self.N = self.dimX
for j in range(self.num_dev):
for i in range(2):
self._tmp_result.append(
clarray.empty(
self.queue[4*j+i],
(self.par_slices+self._overlap, self.NScan,
self.NC, self.dimY, self.dimX),
self.DTYPE, "C"))
self.NUFFT.append(
CLnuFFT.create(self.ctx[j],
self.queue[4*j+i], par,
radial=False,
SMS=True,
streamed=True,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real))
unknown_shape = (self.NSlice, self.unknowns, self.dimY, self.dimX)
coil_shape = (self.NSlice, self.NC, self.dimY, self.dimX)
model_grad_shape = (self.NSlice, self.unknowns,
self.NScan, self.dimY, self.dimX)
data_shape = (self.NSlice, self.NScan, self.NC, self.dimY, self.dimX)
data_shape_T = (self.NScan, self.NC, self.packs,
self.dimY, self.dimX)
trans_shape_T = (self.NScan,
self.NC, self.NSlice, self.dimY, self.dimX)
grad_shape = unknown_shape + (4,)
self.dat_trans_axes = [2, 0, 1, 3, 4]
self.fwdstr = self._defineoperator(
[self._fwdstreamed],
[data_shape],
[[unknown_shape,
coil_shape,
model_grad_shape]])
self.adjstr = self._defineoperator(
[self._adjstreamed],
[unknown_shape],
[[data_shape,
coil_shape,
model_grad_shape]])
self.FTstr = self._defineoperatorSMS(
[self._FT],
[data_shape_T],
[[trans_shape_T]])
self.FTHstr = self._defineoperatorSMS(
[self._FTH],
[trans_shape_T],
[[data_shape_T]])
self._tmp_fft1 = np.zeros((self.NSlice, self.NScan, self.NC,
self.dimY, self.dimX),
dtype=self.DTYPE)
self._tmp_fft2 = np.zeros((self.NScan, self.NC, self.NSlice,
self.dimY, self.dimX),
dtype=self.DTYPE)
self._tmp_transformed = np.zeros((self.NScan, self.NC, self.packs,
self.dimY, self.dimX),
dtype=self.DTYPE)
self._tmp_Kyk1 = np.zeros(unknown_shape,
dtype=self.DTYPE)
self._updateKyk1SMSStreamed = self._defineoperator(
[self._updateKyk1SMS],
[unknown_shape],
[[unknown_shape,
grad_shape,
unknown_shape]],
reverse_dir=True,
posofnorm=[True])
def fwd(self, out, inp, **kwargs):
"""Forward operator application in-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex measurement space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event
A PyOpenCL event to wait for.
"""
self.fwdstr.eval([self._tmp_fft1], inp)
self._tmp_fft2 = np.require(
np.transpose(
self._tmp_fft1, (1, 2, 0, 3, 4)),
requirements='C')
self.FTstr.eval(
[self._tmp_transformed],
[[self._tmp_fft2]])
out[0][...] = np.copy(np.require(
np.transpose(
self._tmp_transformed,
self.dat_trans_axes),
requirements='C'))
def fwdoop(self, inp, **kwargs):
"""Forward operator application out-of-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
self.fwdstr.eval([self._tmp_fft1], inp)
self._tmp_fft2 = np.require(
np.transpose(
self._tmp_fft1, (1, 2, 0, 3, 4)),
requirements='C')
self.FTstr.eval(
[self._tmp_transformed],
[[self._tmp_fft2]])
return np.require(
np.transpose(
self._tmp_transformed,
self.dat_trans_axes),
requirements='C')
def adj(self, out, inp, **kwargs):
"""Adjoint operator application in-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : numpy.Array
The complex parameter space data which is used as input.
inp : numpy.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
tupel of floats:
The lhs and rhs for the line search of the primal-dual algorithm.
"""
self._tmp_transformed = np.require(
np.transpose(
inp[0][0], (1, 2, 0, 3, 4)),
requirements='C')
self.FTHstr.eval(
[self._tmp_fft2],
[[self._tmp_transformed]])
self._tmp_fft1 = np.require(
np.transpose(
self._tmp_fft2, self.dat_trans_axes),
requirements='C')
self.adjstr.eval(out, [[self._tmp_fft1]+inp[0][1:]])
def adjoop(self, inp, **kwargs):
"""Adjoint operator application out-of-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex measurement space which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
self._tmp_transformed = np.require(
np.transpose(
inp[0][0], (1, 2, 0, 3, 4)),
requirements='C')
self.FTHstr.eval(
[self._tmp_fft2],
[[self._tmp_transformed]])
self._tmp_fft1 = np.require(
np.transpose(
self._tmp_fft2, self.dat_trans_axes),
requirements='C')
self.adjstr.eval([self._tmp_Kyk1], [[self._tmp_fft1]+inp[0][1:]])
return self._tmp_Kyk1
def adjKyk1(self, out, inp, **kwargs):
"""Apply the linear operator from parameter space to k-space.
This method fully implements the combined linear operator
consisting of the data part as well as the TGV regularization part.
Parameters
----------
out : numpy.Array
The complex parameter space data which is used as input.
inp : numpy.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
tupel of floats:
The lhs and rhs for the line search of the primal-dual algorithm.
"""
self._tmp_transformed = np.require(
np.transpose(
inp[0][0], (1, 2, 0, 3, 4)),
requirements='C')
self.FTHstr.eval(
[self._tmp_fft2],
[[self._tmp_transformed]])
self._tmp_fft1 = np.require(
np.transpose(
self._tmp_fft2, self.dat_trans_axes),
requirements='C')
self.adjstr.eval([self._tmp_Kyk1], [[self._tmp_fft1]+inp[0][2:-1]])
return self._updateKyk1SMSStreamed.evalwithnorm(
out,
[[self._tmp_Kyk1]+[inp[0][1]]+[inp[0][-1]]], kwargs["par"])
def _fwdstreamed(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return self.prg[idx].operator_fwd(
self.queue[4*idx+idxq],
(self.par_slices+self._overlap, self.dimY, self.dimX), None,
outp.data, inp[0].data,
inp[1].data,
inp[2].data,
np.int32(self.NC),
np.int32(self.NScan), np.int32(self.unknowns),
wait_for=(outp.events+inp[0].events +
inp[1].events+inp[2].events+wait_for))
def _adjstreamed(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return self.prg[idx].operator_ad(
self.queue[4*idx+idxq],
(self.par_slices+self._overlap, self.dimY, self.dimX), None,
outp.data, inp[0].data,
inp[1].data,
inp[2].data,
np.int32(self.NC), np.int32(self.NScan),
np.int32(self.unknowns),
wait_for=(
inp[0].events +
outp.events+inp[1].events +
inp[2].events + wait_for))
def _FT(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return self.NUFFT[2*idx+idxq].FFT(outp, inp[0])
def _FTH(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return self.NUFFT[2*idx+idxq].FFTH(outp, inp[0])
def _updateKyk1SMS(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return self.prg[idx].update_Kyk1SMS(
self.queue[4*idx+idxq],
(self.par_slices+self._overlap, self.dimY, self.dimX), None,
outp.data, inp[0].data,
inp[1].data,
par[0][idx].data, np.int32(self.unknowns),
np.int32(bound_cond), self.DTYPE_real(self._dz),
wait_for=(
inp[0].events +
outp.events+inp[1].events +
wait_for))
def _defineoperatorSMS(self,
functions,
outp,
inp,
reverse_dir=False,
posofnorm=None):
return streaming.Stream(
functions,
outp,
inp,
1,
0,
self.NScan,
self.queue,
self.num_dev,
reverse_dir,
posofnorm,
DTYPE=self.DTYPE)
class OperatorFiniteGradient(Operator):
"""Gradient operator.
This class implements the finite difference gradient operation and
the adjoint (negative divergence).
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
Attributes
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
ratio : list of PyOpenCL.Array
Ratio between the different unknowns
"""
def __init__(self, par, prg, DTYPE=np.complex64, DTYPE_real=np.float32):
super().__init__(par, prg, DTYPE, DTYPE_real)
self.queue = self.queue[0]
self.ctx = self.ctx[0]
self.ratio = clarray.to_device(
self.queue,
(par["weights"]).astype(
dtype=self.DTYPE_real))
self._weights = par["weights"]
def fwd(self, out, inp, **kwargs):
"""Forward operator application in-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex data which is the result of the
computation.
inp : PyOpenCL.Array
The complex data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event
A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
return self.prg.gradient(
self.queue, inp.shape[1:], None, out.data, inp.data,
np.int32(self.unknowns),
self.ratio.data, self.DTYPE_real(self._dz),
wait_for=out.events + inp.events + wait_for)
def fwdoop(self, inp, **kwargs):
"""Forward operator application out-of-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
tmp_result = clarray.empty(
self.queue, (self.unknowns,
self.NSlice, self.dimY, self.dimX, 4),
self.DTYPE, "C")
tmp_result.add_event(self.prg.gradient(
self.queue, inp.shape[1:], None, tmp_result.data, inp.data,
np.int32(self.unknowns),
self.ratio.data, self.DTYPE_real(self._dz),
wait_for=tmp_result.events + inp.events + wait_for))
return tmp_result
def adj(self, out, inp, **kwargs):
"""Adjoint operator application in-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex measurement space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
return self.prg.divergence(
self.queue, inp.shape[1:-1], None, out.data, inp.data,
np.int32(self.unknowns), self.ratio.data,
self.DTYPE_real(self._dz),
wait_for=out.events + inp.events + wait_for)
def adjoop(self, inp, **kwargs):
"""Adjoint operator application out-of-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex measurement space which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
tmp_result = clarray.empty(
self.queue, (self.unknowns, self.NSlice, self.dimY, self.dimX),
self.DTYPE, "C")
tmp_result.add_event(self.prg.divergence(
self.queue, inp.shape[1:-1], None, tmp_result.data, inp.data,
np.int32(self.unknowns), self.ratio.data,
self.DTYPE_real(self._dz),
wait_for=tmp_result.events + inp.events + wait_for))
return tmp_result
class OperatorFiniteSymGradient(Operator):
"""Symmetrized gradient operator.
This class implements the finite difference symmetrized gradient
operation and the adjoint (negative symmetrized divergence).
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
Attributes
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
ratio : list of PyOpenCL.Array
Ratio between the different unknowns
"""
def __init__(self, par, prg, DTYPE=np.complex64, DTYPE_real=np.float32):
super().__init__(par, prg, DTYPE, DTYPE_real)
self.queue = self.queue[0]
self.ctx = self.ctx[0]
self.ratio = clarray.to_device(
self.queue,
(par["weights"]).astype(
dtype=self.DTYPE_real))
self._weights = par["weights"]
def fwd(self, out, inp, **kwargs):
"""Forward operator application in-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex data which is the result of the
computation.
inp : PyOpenCL.Array
The complex data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event
A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
return self.prg.sym_grad(
self.queue, inp.shape[1:-1], None, out.data, inp.data,
np.int32(self.unknowns_TGV),
self.ratio.data,
self.DTYPE_real(self._dz),
wait_for=out.events + inp.events + wait_for)
def fwdoop(self, inp, **kwargs):
"""Forward operator application out-of-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
tmp_result = clarray.empty(
self.queue, (self.unknowns,
self.NSlice, self.dimY, self.dimX, 8),
self.DTYPE, "C")
tmp_result.add_event(self.prg.sym_grad(
self.queue, inp.shape[1:-1], None, tmp_result.data, inp.data,
np.int32(self.unknowns_TGV),
self.ratio.data,
self.DTYPE_real(self._dz),
wait_for=tmp_result.events + inp.events + wait_for))
return tmp_result
def adj(self, out, inp, **kwargs):
"""Adjoint operator application in-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex measurement space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
return self.prg.sym_divergence(
self.queue, inp.shape[1:-1], None, out.data, inp.data,
np.int32(self.unknowns_TGV),
self.ratio.data,
self.DTYPE_real(self._dz),
wait_for=out.events + inp.events + wait_for)
def adjoop(self, inp, **kwargs):
"""Adjoint operator application out-of-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex measurement space which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
if "wait_for" in kwargs.keys():
wait_for = kwargs["wait_for"]
else:
wait_for = []
tmp_result = clarray.empty(
self.queue, (self.unknowns,
self.NSlice, self.dimY, self.dimX, 4),
self.DTYPE, "C")
tmp_result.add_event(self.prg.sym_divergence(
self.queue, inp.shape[1:-1], None, tmp_result.data, inp.data,
np.int32(self.unknowns_TGV),
self.ratio.data,
self.DTYPE_real(self._dz),
wait_for=tmp_result.events + inp.events + wait_for))
return tmp_result
class OperatorFiniteGradientStreamed(Operator):
"""Streamed gradient operator.
This class implements the finite difference gradient
operation and the adjoint (negative divergence).
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
Attributes
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
par_slices : int
Slices to parallel transfer to the compute device.
ratio : list of PyOpenCL.Array
Ratio between the different unknowns
"""
def __init__(self, par, prg, DTYPE=np.complex64, DTYPE_real=np.float32):
super().__init__(par, prg, DTYPE, DTYPE_real)
self._weights = par["weights"]
par["overlap"] = 1
self._overlap = par["overlap"]
self.par_slices = par["par_slices"]
self.ratio = []
for j in range(self.num_dev):
self.ratio.append(
clarray.to_device(
self.queue[4*j],
(par["weights"]).astype(
dtype=self.DTYPE_real)))
self.unknown_shape = (self.NSlice, self.unknowns, self.dimY, self.dimX)
self._grad_shape = self.unknown_shape + (4,)
self._stream_grad = self._defineoperator(
[self._grad],
[self._grad_shape],
[[self.unknown_shape]])
self._stream_div = self._defineoperator(
[self._div],
[self.unknown_shape],
[[self._grad_shape]],
reverse_dir=True)
def fwd(self, out, inp, **kwargs):
"""Forward operator application in-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex data which is the result of the
computation.
inp : PyOpenCL.Array
The complex data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event
A PyOpenCL event to wait for.
"""
self._stream_grad.eval(out, inp)
def fwdoop(self, inp, **kwargs):
"""Forward operator application out-of-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
out = np.zeros(self._grad_shape, dtype=self.DTYPE)
self._stream_grad.eval([out], inp)
return out
def adj(self, out, inp, **kwargs):
"""Adjoint operator application in-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex measurement space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
self._stream_div.eval(out, inp)
def adjoop(self, inp, **kwargs):
"""Adjoint operator application out-of-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex measurement space which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
out = np.zeros(self.unknown_shape, dtype=self.DTYPE)
self._stream_div.eval([out], inp)
return out
def _grad(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return self.prg[idx].gradient(
self.queue[4*idx+idxq],
(self._overlap+self.par_slices, self.dimY, self.dimX),
None, outp.data, inp[0].data,
np.int32(self.unknowns),
self.ratio[idx].data, self.DTYPE_real(self._dz),
wait_for=outp.events + inp[0].events + wait_for)
def _div(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return self.prg[idx].divergence(
self.queue[4*idx+idxq],
(self._overlap+self.par_slices, self.dimY, self.dimX), None,
outp.data, inp[0].data, np.int32(self.unknowns),
self.ratio[idx].data, np.int32(bound_cond),
self.DTYPE_real(self._dz),
wait_for=outp.events + inp[0].events + wait_for)
def getStreamedGradientObject(self):
"""Access privat stream gradient object.
Returns
-------
PyqMRI.Streaming.Stream:
A PyQMRI streaming object for the gradient computation.
"""
return self._stream_grad
class OperatorFiniteSymGradientStreamed(Operator):
"""Streamed symmetrized gradient operator.
This class implements the finite difference symmetrized gradient
operation and the adjoint (negative symmetrized divergence).
Parameters
----------
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator.
DTYPE : numpy.dtype, numpy.complex64
Complex working precission.
DTYPE_real : numpy.dtype, numpy.float32
Real working precission.
Attributes
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
par_slices : int
Slices to parallel transfer to the compute device.
ratio : list of PyOpenCL.Array
Ratio between the different unknowns
"""
def __init__(self, par, prg, DTYPE=np.complex64, DTYPE_real=np.float32):
super().__init__(par, prg, DTYPE, DTYPE_real)
par["overlap"] = 1
self._overlap = par["overlap"]
self.par_slices = par["par_slices"]
unknown_shape = (self.NSlice, self.unknowns, self.dimY, self.dimX)
self._grad_shape = unknown_shape + (4,)
self._symgrad_shape = unknown_shape + (8,)
self.ratio = []
for j in range(self.num_dev):
self.ratio.append(
clarray.to_device(
self.queue[4*j],
(par["weights"]).astype(
dtype=self.DTYPE_real)))
self._stream_symgrad = self._defineoperator(
[self._symgrad],
[self._symgrad_shape],
[[self._grad_shape]],
reverse_dir=True)
self._stream_symdiv = self._defineoperator(
[self._symdiv],
[self._grad_shape],
[[self._symgrad_shape]])
def fwd(self, out, inp, **kwargs):
"""Forward operator application in-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex data which is the result of the
computation.
inp : PyOpenCL.Array
The complex data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event
A PyOpenCL event to wait for.
"""
self._stream_symgrad.eval(out, inp)
def fwdoop(self, inp, **kwargs):
"""Forward operator application out-of-place.
Apply the linear operator from parameter space to measurement space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex parameter space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
out = np.zeros(self._symgrad_shape, dtype=self.DTYPE)
self._stream_symgrad.eval([out], inp)
return out
def adj(self, out, inp, **kwargs):
"""Adjoint operator application in-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
Parameters
----------
out : PyOpenCL.Array
The complex parameter space data which is the result of the
computation.
inp : PyOpenCL.Array
The complex measurement space data which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
self._stream_symdiv.eval(out, inp)
def adjoop(self, inp, **kwargs):
"""Adjoint operator application out-of-place.
Apply the linear operator from measurement space to parameter space
If streamed operations are used the PyOpenCL.Arrays are replaced
by Numpy.Array
This method need to generate a temporary array and will return it as
the result.
Parameters
----------
inp : PyOpenCL.Array
The complex measurement space which is used as input.
wait_for : list of PyopenCL.Event
A List of PyOpenCL events to wait for.
Returns
-------
PyOpenCL.Array: A PyOpenCL array containing the result of the
computation.
"""
out = np.zeros(self._grad_shape, dtype=self.DTYPE)
self._stream_symdiv.eval([out], inp)
return out
def _symgrad(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return self.prg[idx].sym_grad(
self.queue[4*idx+idxq],
(self._overlap+self.par_slices, self.dimY, self.dimX), None,
outp.data, inp[0].data, np.int32(self.unknowns),
self.ratio[idx].data,
self.DTYPE_real(self._dz),
wait_for=outp.events + inp[0].events + wait_for)
def _symdiv(self, outp, inp, par=None, idx=0, idxq=0,
bound_cond=0, wait_for=None):
if wait_for is None:
wait_for = []
return self.prg[idx].sym_divergence(
self.queue[4*idx+idxq],
(self._overlap+self.par_slices, self.dimY, self.dimX), None,
outp.data, inp[0].data,
np.int32(self.unknowns),
self.ratio[idx].data,
np.int32(bound_cond),
self.DTYPE_real(self._dz),
wait_for=outp.events + inp[0].events + wait_for)
def getStreamedSymGradientObject(self):
"""Access privat stream symmetrized gradient object.
Returns
-------
PyqMRI.Streaming.Stream:
A PyQMRI streaming object for the symmetrized gradient
computation.
"""
return self._stream_symgrad
| 36.470463
| 79
| 0.568197
| 12,048
| 102,482
| 4.742696
| 0.032454
| 0.04153
| 0.023031
| 0.028176
| 0.930539
| 0.923171
| 0.912128
| 0.905268
| 0.898915
| 0.888432
| 0
| 0.00941
| 0.345709
| 102,482
| 2,809
| 80
| 36.483446
| 0.842751
| 0.451211
| 0
| 0.816273
| 0
| 0
| 0.013798
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073491
| false
| 0
| 0.004374
| 0.000875
| 0.137358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
893b872993bb3d94f68033c732f2a88f90414dad
| 345
|
py
|
Python
|
slack/tests/data/__init__.py
|
drewp/slack-sansio
|
3c578bd087073174b1ec31b9a610e889d1fa0449
|
[
"MIT"
] | 39
|
2017-08-19T16:58:15.000Z
|
2022-03-22T01:00:03.000Z
|
slack/tests/data/__init__.py
|
drewp/slack-sansio
|
3c578bd087073174b1ec31b9a610e889d1fa0449
|
[
"MIT"
] | 32
|
2017-08-24T18:14:32.000Z
|
2019-07-25T16:57:55.000Z
|
slack/tests/data/__init__.py
|
drewp/slack-sansio
|
3c578bd087073174b1ec31b9a610e889d1fa0449
|
[
"MIT"
] | 10
|
2017-08-09T15:56:56.000Z
|
2019-10-31T06:24:46.000Z
|
from .events import Events, Messages, RTMEvents # noQa F401
from .actions import BlockAction # noQa F401
from .actions import MessageAction # noQa F401
from .actions import DialogSubmission # noQa F401
from .actions import InteractiveMessage # noQa F401
from .methods import Methods # noQa F401
from .commands import Commands # noQa F401
| 43.125
| 60
| 0.785507
| 44
| 345
| 6.159091
| 0.318182
| 0.206642
| 0.265683
| 0.280443
| 0.369004
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 0.168116
| 345
| 7
| 61
| 49.285714
| 0.87108
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
89797c45d503037a79a4c4bc7d87100bce1ec536
| 319
|
py
|
Python
|
utils/decorators.py
|
AryamanSrii/RKS-BOT
|
4ef8db42c66647cc3387ab6bc006ad8cc9630278
|
[
"MIT"
] | null | null | null |
utils/decorators.py
|
AryamanSrii/RKS-BOT
|
4ef8db42c66647cc3387ab6bc006ad8cc9630278
|
[
"MIT"
] | null | null | null |
utils/decorators.py
|
AryamanSrii/RKS-BOT
|
4ef8db42c66647cc3387ab6bc006ad8cc9630278
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
def command(*args):
if len(list(args))==0: return commands.command(pass_context=True)
return commands.command(pass_context=True, aliases=list(args)[0].split(','))
def cooldown(*args):
return commands.cooldown(1, int(list(args)[0]), commands.BucketType.user)
| 35.444444
| 80
| 0.739812
| 46
| 319
| 5.086957
| 0.5
| 0.102564
| 0.115385
| 0.213675
| 0.307692
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0.014035
| 0.106583
| 319
| 9
| 81
| 35.444444
| 0.807018
| 0
| 0
| 0
| 0
| 0
| 0.003125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| true
| 0.285714
| 0.285714
| 0.142857
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
899df9e3be210e533cd97b004c17111ffc39a447
| 123
|
py
|
Python
|
gaiassl/models/necks/__init__.py
|
GAIA-vision/GAIA-ssl
|
3c22806a9337278a48dcbcc1fcc40082b8fe5af5
|
[
"Apache-2.0"
] | 13
|
2022-03-06T07:35:14.000Z
|
2022-03-31T12:24:55.000Z
|
gaiassl/models/necks/__init__.py
|
BraveGroup/gaiassl
|
7ac33fe2b8af0791caa89dfa789f03a3e20c9fa4
|
[
"Apache-2.0"
] | null | null | null |
gaiassl/models/necks/__init__.py
|
BraveGroup/gaiassl
|
7ac33fe2b8af0791caa89dfa789f03a3e20c9fa4
|
[
"Apache-2.0"
] | 1
|
2022-03-31T12:24:58.000Z
|
2022-03-31T12:24:58.000Z
|
from .dynamic_nonlinear_necks import *
from .dynamic_densecl_necks import *
from .dynamic_nonlinear_simclr_necks import *
| 24.6
| 45
| 0.845528
| 16
| 123
| 6.0625
| 0.4375
| 0.340206
| 0.412371
| 0.453608
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105691
| 123
| 4
| 46
| 30.75
| 0.881818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
985c797366f97e83f8e59008ac50cc316412120f
| 60,232
|
py
|
Python
|
sdk/python/pulumi_alicloud/ess/scaling_group.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 42
|
2019-03-18T06:34:37.000Z
|
2022-03-24T07:08:57.000Z
|
sdk/python/pulumi_alicloud/ess/scaling_group.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 152
|
2019-04-15T21:03:44.000Z
|
2022-03-29T18:00:57.000Z
|
sdk/python/pulumi_alicloud/ess/scaling_group.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-08-26T17:30:07.000Z
|
2021-07-05T01:37:45.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ScalingGroupArgs', 'ScalingGroup']
@pulumi.input_type
class ScalingGroupArgs:
def __init__(__self__, *,
max_size: pulumi.Input[int],
min_size: pulumi.Input[int],
db_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
default_cooldown: Optional[pulumi.Input[int]] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
group_deletion_protection: Optional[pulumi.Input[bool]] = None,
loadbalancer_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
multi_az_policy: Optional[pulumi.Input[str]] = None,
on_demand_base_capacity: Optional[pulumi.Input[int]] = None,
on_demand_percentage_above_base_capacity: Optional[pulumi.Input[int]] = None,
removal_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
scaling_group_name: Optional[pulumi.Input[str]] = None,
spot_instance_pools: Optional[pulumi.Input[int]] = None,
spot_instance_remedy: Optional[pulumi.Input[bool]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None,
vswitch_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ScalingGroup resource.
:param pulumi.Input[int] max_size: Maximum number of ECS instances in the scaling group. Value range: [0, 1000].
:param pulumi.Input[int] min_size: Minimum number of ECS instances in the scaling group. Value range: [0, 1000].
:param pulumi.Input[Sequence[pulumi.Input[str]]] db_instance_ids: If an RDS instance is specified in the scaling group, the scaling group automatically attaches the Intranet IP addresses of its ECS instances to the RDS access whitelist.
- The specified RDS instance must be in running status.
- The specified RDS instance’s whitelist must have room for more IP addresses.
:param pulumi.Input[int] default_cooldown: Default cool-down time (in seconds) of the scaling group. Value range: [0, 86400]. The default value is 300s.
:param pulumi.Input[int] desired_capacity: Expected number of ECS instances in the scaling group. Value range: [min_size, max_size].
:param pulumi.Input[bool] group_deletion_protection: Specifies whether the scaling group deletion protection is enabled. `true` or `false`, Default value: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] loadbalancer_ids: If a Server Load Balancer instance is specified in the scaling group, the scaling group automatically attaches its ECS instances to the Server Load Balancer instance.
- The Server Load Balancer instance must be enabled.
- At least one listener must be configured for each Server Load Balancer and it HealthCheck must be on. Otherwise, creation will fail (it may be useful to add a `depends_on` argument
targeting your `slb.Listener` in order to make sure the listener with its HealthCheck configuration is ready before creating your scaling group).
- The Server Load Balancer instance attached with VPC-type ECS instances cannot be attached to the scaling group.
- The default weight of an ECS instance attached to the Server Load Balancer instance is 50.
:param pulumi.Input[str] multi_az_policy: Multi-AZ scaling group ECS instance expansion and contraction strategy. PRIORITY, BALANCE or COST_OPTIMIZED(Available in 1.54.0+).
:param pulumi.Input[int] on_demand_base_capacity: The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.
:param pulumi.Input[int] on_demand_percentage_above_base_capacity: Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity.
:param pulumi.Input[Sequence[pulumi.Input[str]]] removal_policies: RemovalPolicy is used to select the ECS instances you want to remove from the scaling group when multiple candidates for removal exist. Optional values:
- OldestInstance: removes the ECS instance that is added to the scaling group at the earliest point in time.
- NewestInstance: removes the ECS instance that is added to the scaling group at the latest point in time.
- OldestScalingConfiguration: removes the ECS instance that is created based on the earliest scaling configuration.
- Default values: Default value of RemovalPolicy.1: OldestScalingConfiguration. Default value of RemovalPolicy.2: OldestInstance.
:param pulumi.Input[str] scaling_group_name: Name shown for the scaling group, which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain numbers, underscores `_`, hyphens `-`, and decimal points `.`. If this parameter is not specified, the default value is ScalingGroupId.
:param pulumi.Input[int] spot_instance_pools: The number of Spot pools to use to allocate your Spot capacity. The Spot pools is composed of instance types of lowest price.
:param pulumi.Input[bool] spot_instance_remedy: Whether to replace spot instances with newly created spot/onDemand instance when receive a spot recycling message.
:param pulumi.Input[str] vswitch_id: It has been deprecated from version 1.7.1 and new field 'vswitch_ids' replaces it.
:param pulumi.Input[Sequence[pulumi.Input[str]]] vswitch_ids: List of virtual switch IDs in which the ecs instances to be launched.
"""
pulumi.set(__self__, "max_size", max_size)
pulumi.set(__self__, "min_size", min_size)
if db_instance_ids is not None:
pulumi.set(__self__, "db_instance_ids", db_instance_ids)
if default_cooldown is not None:
pulumi.set(__self__, "default_cooldown", default_cooldown)
if desired_capacity is not None:
pulumi.set(__self__, "desired_capacity", desired_capacity)
if group_deletion_protection is not None:
pulumi.set(__self__, "group_deletion_protection", group_deletion_protection)
if loadbalancer_ids is not None:
pulumi.set(__self__, "loadbalancer_ids", loadbalancer_ids)
if multi_az_policy is not None:
pulumi.set(__self__, "multi_az_policy", multi_az_policy)
if on_demand_base_capacity is not None:
pulumi.set(__self__, "on_demand_base_capacity", on_demand_base_capacity)
if on_demand_percentage_above_base_capacity is not None:
pulumi.set(__self__, "on_demand_percentage_above_base_capacity", on_demand_percentage_above_base_capacity)
if removal_policies is not None:
pulumi.set(__self__, "removal_policies", removal_policies)
if scaling_group_name is not None:
pulumi.set(__self__, "scaling_group_name", scaling_group_name)
if spot_instance_pools is not None:
pulumi.set(__self__, "spot_instance_pools", spot_instance_pools)
if spot_instance_remedy is not None:
pulumi.set(__self__, "spot_instance_remedy", spot_instance_remedy)
if vswitch_id is not None:
warnings.warn("""Field 'vswitch_id' has been deprecated from provider version 1.7.1, and new field 'vswitch_ids' can replace it.""", DeprecationWarning)
pulumi.log.warn("""vswitch_id is deprecated: Field 'vswitch_id' has been deprecated from provider version 1.7.1, and new field 'vswitch_ids' can replace it.""")
if vswitch_id is not None:
pulumi.set(__self__, "vswitch_id", vswitch_id)
if vswitch_ids is not None:
pulumi.set(__self__, "vswitch_ids", vswitch_ids)
@property
@pulumi.getter(name="maxSize")
def max_size(self) -> pulumi.Input[int]:
"""
Maximum number of ECS instances in the scaling group. Value range: [0, 1000].
"""
return pulumi.get(self, "max_size")
@max_size.setter
def max_size(self, value: pulumi.Input[int]):
pulumi.set(self, "max_size", value)
@property
@pulumi.getter(name="minSize")
def min_size(self) -> pulumi.Input[int]:
"""
Minimum number of ECS instances in the scaling group. Value range: [0, 1000].
"""
return pulumi.get(self, "min_size")
@min_size.setter
def min_size(self, value: pulumi.Input[int]):
pulumi.set(self, "min_size", value)
@property
@pulumi.getter(name="dbInstanceIds")
def db_instance_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
If an RDS instance is specified in the scaling group, the scaling group automatically attaches the Intranet IP addresses of its ECS instances to the RDS access whitelist.
- The specified RDS instance must be in running status.
- The specified RDS instance’s whitelist must have room for more IP addresses.
"""
return pulumi.get(self, "db_instance_ids")
@db_instance_ids.setter
def db_instance_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "db_instance_ids", value)
@property
@pulumi.getter(name="defaultCooldown")
def default_cooldown(self) -> Optional[pulumi.Input[int]]:
"""
Default cool-down time (in seconds) of the scaling group. Value range: [0, 86400]. The default value is 300s.
"""
return pulumi.get(self, "default_cooldown")
@default_cooldown.setter
def default_cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_cooldown", value)
@property
@pulumi.getter(name="desiredCapacity")
def desired_capacity(self) -> Optional[pulumi.Input[int]]:
"""
Expected number of ECS instances in the scaling group. Value range: [min_size, max_size].
"""
return pulumi.get(self, "desired_capacity")
@desired_capacity.setter
def desired_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "desired_capacity", value)
@property
@pulumi.getter(name="groupDeletionProtection")
def group_deletion_protection(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the scaling group deletion protection is enabled. `true` or `false`, Default value: `false`.
"""
return pulumi.get(self, "group_deletion_protection")
@group_deletion_protection.setter
def group_deletion_protection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "group_deletion_protection", value)
@property
@pulumi.getter(name="loadbalancerIds")
def loadbalancer_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
If a Server Load Balancer instance is specified in the scaling group, the scaling group automatically attaches its ECS instances to the Server Load Balancer instance.
- The Server Load Balancer instance must be enabled.
- At least one listener must be configured for each Server Load Balancer and it HealthCheck must be on. Otherwise, creation will fail (it may be useful to add a `depends_on` argument
targeting your `slb.Listener` in order to make sure the listener with its HealthCheck configuration is ready before creating your scaling group).
- The Server Load Balancer instance attached with VPC-type ECS instances cannot be attached to the scaling group.
- The default weight of an ECS instance attached to the Server Load Balancer instance is 50.
"""
return pulumi.get(self, "loadbalancer_ids")
@loadbalancer_ids.setter
def loadbalancer_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "loadbalancer_ids", value)
@property
@pulumi.getter(name="multiAzPolicy")
def multi_az_policy(self) -> Optional[pulumi.Input[str]]:
"""
Multi-AZ scaling group ECS instance expansion and contraction strategy. PRIORITY, BALANCE or COST_OPTIMIZED(Available in 1.54.0+).
"""
return pulumi.get(self, "multi_az_policy")
@multi_az_policy.setter
def multi_az_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "multi_az_policy", value)
@property
@pulumi.getter(name="onDemandBaseCapacity")
def on_demand_base_capacity(self) -> Optional[pulumi.Input[int]]:
"""
The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.
"""
return pulumi.get(self, "on_demand_base_capacity")
@on_demand_base_capacity.setter
def on_demand_base_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "on_demand_base_capacity", value)
@property
@pulumi.getter(name="onDemandPercentageAboveBaseCapacity")
def on_demand_percentage_above_base_capacity(self) -> Optional[pulumi.Input[int]]:
"""
Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity.
"""
return pulumi.get(self, "on_demand_percentage_above_base_capacity")
@on_demand_percentage_above_base_capacity.setter
def on_demand_percentage_above_base_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "on_demand_percentage_above_base_capacity", value)
@property
@pulumi.getter(name="removalPolicies")
def removal_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
RemovalPolicy is used to select the ECS instances you want to remove from the scaling group when multiple candidates for removal exist. Optional values:
- OldestInstance: removes the ECS instance that is added to the scaling group at the earliest point in time.
- NewestInstance: removes the ECS instance that is added to the scaling group at the latest point in time.
- OldestScalingConfiguration: removes the ECS instance that is created based on the earliest scaling configuration.
- Default values: Default value of RemovalPolicy.1: OldestScalingConfiguration. Default value of RemovalPolicy.2: OldestInstance.
"""
return pulumi.get(self, "removal_policies")
@removal_policies.setter
def removal_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "removal_policies", value)
@property
@pulumi.getter(name="scalingGroupName")
def scaling_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Name shown for the scaling group, which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain numbers, underscores `_`, hyphens `-`, and decimal points `.`. If this parameter is not specified, the default value is ScalingGroupId.
"""
return pulumi.get(self, "scaling_group_name")
@scaling_group_name.setter
def scaling_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scaling_group_name", value)
@property
@pulumi.getter(name="spotInstancePools")
def spot_instance_pools(self) -> Optional[pulumi.Input[int]]:
"""
The number of Spot pools to use to allocate your Spot capacity. The Spot pools is composed of instance types of lowest price.
"""
return pulumi.get(self, "spot_instance_pools")
@spot_instance_pools.setter
def spot_instance_pools(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "spot_instance_pools", value)
@property
@pulumi.getter(name="spotInstanceRemedy")
def spot_instance_remedy(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to replace spot instances with newly created spot/onDemand instance when receive a spot recycling message.
"""
return pulumi.get(self, "spot_instance_remedy")
@spot_instance_remedy.setter
def spot_instance_remedy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "spot_instance_remedy", value)
@property
@pulumi.getter(name="vswitchId")
def vswitch_id(self) -> Optional[pulumi.Input[str]]:
"""
It has been deprecated from version 1.7.1 and new field 'vswitch_ids' replaces it.
"""
return pulumi.get(self, "vswitch_id")
@vswitch_id.setter
def vswitch_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vswitch_id", value)
@property
@pulumi.getter(name="vswitchIds")
def vswitch_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of virtual switch IDs in which the ecs instances to be launched.
"""
return pulumi.get(self, "vswitch_ids")
@vswitch_ids.setter
def vswitch_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "vswitch_ids", value)
@pulumi.input_type
class _ScalingGroupState:
def __init__(__self__, *,
db_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
default_cooldown: Optional[pulumi.Input[int]] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
group_deletion_protection: Optional[pulumi.Input[bool]] = None,
loadbalancer_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_size: Optional[pulumi.Input[int]] = None,
min_size: Optional[pulumi.Input[int]] = None,
multi_az_policy: Optional[pulumi.Input[str]] = None,
on_demand_base_capacity: Optional[pulumi.Input[int]] = None,
on_demand_percentage_above_base_capacity: Optional[pulumi.Input[int]] = None,
removal_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
scaling_group_name: Optional[pulumi.Input[str]] = None,
spot_instance_pools: Optional[pulumi.Input[int]] = None,
spot_instance_remedy: Optional[pulumi.Input[bool]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None,
vswitch_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering ScalingGroup resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] db_instance_ids: If an RDS instance is specified in the scaling group, the scaling group automatically attaches the Intranet IP addresses of its ECS instances to the RDS access whitelist.
- The specified RDS instance must be in running status.
- The specified RDS instance’s whitelist must have room for more IP addresses.
:param pulumi.Input[int] default_cooldown: Default cool-down time (in seconds) of the scaling group. Value range: [0, 86400]. The default value is 300s.
:param pulumi.Input[int] desired_capacity: Expected number of ECS instances in the scaling group. Value range: [min_size, max_size].
:param pulumi.Input[bool] group_deletion_protection: Specifies whether the scaling group deletion protection is enabled. `true` or `false`, Default value: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] loadbalancer_ids: If a Server Load Balancer instance is specified in the scaling group, the scaling group automatically attaches its ECS instances to the Server Load Balancer instance.
- The Server Load Balancer instance must be enabled.
- At least one listener must be configured for each Server Load Balancer and it HealthCheck must be on. Otherwise, creation will fail (it may be useful to add a `depends_on` argument
targeting your `slb.Listener` in order to make sure the listener with its HealthCheck configuration is ready before creating your scaling group).
- The Server Load Balancer instance attached with VPC-type ECS instances cannot be attached to the scaling group.
- The default weight of an ECS instance attached to the Server Load Balancer instance is 50.
:param pulumi.Input[int] max_size: Maximum number of ECS instances in the scaling group. Value range: [0, 1000].
:param pulumi.Input[int] min_size: Minimum number of ECS instances in the scaling group. Value range: [0, 1000].
:param pulumi.Input[str] multi_az_policy: Multi-AZ scaling group ECS instance expansion and contraction strategy. PRIORITY, BALANCE or COST_OPTIMIZED(Available in 1.54.0+).
:param pulumi.Input[int] on_demand_base_capacity: The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.
:param pulumi.Input[int] on_demand_percentage_above_base_capacity: Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity.
:param pulumi.Input[Sequence[pulumi.Input[str]]] removal_policies: RemovalPolicy is used to select the ECS instances you want to remove from the scaling group when multiple candidates for removal exist. Optional values:
- OldestInstance: removes the ECS instance that is added to the scaling group at the earliest point in time.
- NewestInstance: removes the ECS instance that is added to the scaling group at the latest point in time.
- OldestScalingConfiguration: removes the ECS instance that is created based on the earliest scaling configuration.
- Default values: Default value of RemovalPolicy.1: OldestScalingConfiguration. Default value of RemovalPolicy.2: OldestInstance.
:param pulumi.Input[str] scaling_group_name: Name shown for the scaling group, which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain numbers, underscores `_`, hyphens `-`, and decimal points `.`. If this parameter is not specified, the default value is ScalingGroupId.
:param pulumi.Input[int] spot_instance_pools: The number of Spot pools to use to allocate your Spot capacity. The Spot pools is composed of instance types of lowest price.
:param pulumi.Input[bool] spot_instance_remedy: Whether to replace spot instances with newly created spot/onDemand instance when receive a spot recycling message.
:param pulumi.Input[str] vswitch_id: It has been deprecated from version 1.7.1 and new field 'vswitch_ids' replaces it.
:param pulumi.Input[Sequence[pulumi.Input[str]]] vswitch_ids: List of virtual switch IDs in which the ecs instances to be launched.
"""
if db_instance_ids is not None:
pulumi.set(__self__, "db_instance_ids", db_instance_ids)
if default_cooldown is not None:
pulumi.set(__self__, "default_cooldown", default_cooldown)
if desired_capacity is not None:
pulumi.set(__self__, "desired_capacity", desired_capacity)
if group_deletion_protection is not None:
pulumi.set(__self__, "group_deletion_protection", group_deletion_protection)
if loadbalancer_ids is not None:
pulumi.set(__self__, "loadbalancer_ids", loadbalancer_ids)
if max_size is not None:
pulumi.set(__self__, "max_size", max_size)
if min_size is not None:
pulumi.set(__self__, "min_size", min_size)
if multi_az_policy is not None:
pulumi.set(__self__, "multi_az_policy", multi_az_policy)
if on_demand_base_capacity is not None:
pulumi.set(__self__, "on_demand_base_capacity", on_demand_base_capacity)
if on_demand_percentage_above_base_capacity is not None:
pulumi.set(__self__, "on_demand_percentage_above_base_capacity", on_demand_percentage_above_base_capacity)
if removal_policies is not None:
pulumi.set(__self__, "removal_policies", removal_policies)
if scaling_group_name is not None:
pulumi.set(__self__, "scaling_group_name", scaling_group_name)
if spot_instance_pools is not None:
pulumi.set(__self__, "spot_instance_pools", spot_instance_pools)
if spot_instance_remedy is not None:
pulumi.set(__self__, "spot_instance_remedy", spot_instance_remedy)
if vswitch_id is not None:
warnings.warn("""Field 'vswitch_id' has been deprecated from provider version 1.7.1, and new field 'vswitch_ids' can replace it.""", DeprecationWarning)
pulumi.log.warn("""vswitch_id is deprecated: Field 'vswitch_id' has been deprecated from provider version 1.7.1, and new field 'vswitch_ids' can replace it.""")
if vswitch_id is not None:
pulumi.set(__self__, "vswitch_id", vswitch_id)
if vswitch_ids is not None:
pulumi.set(__self__, "vswitch_ids", vswitch_ids)
@property
@pulumi.getter(name="dbInstanceIds")
def db_instance_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
If an RDS instance is specified in the scaling group, the scaling group automatically attaches the Intranet IP addresses of its ECS instances to the RDS access whitelist.
- The specified RDS instance must be in running status.
- The specified RDS instance’s whitelist must have room for more IP addresses.
"""
return pulumi.get(self, "db_instance_ids")
@db_instance_ids.setter
def db_instance_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "db_instance_ids", value)
@property
@pulumi.getter(name="defaultCooldown")
def default_cooldown(self) -> Optional[pulumi.Input[int]]:
"""
Default cool-down time (in seconds) of the scaling group. Value range: [0, 86400]. The default value is 300s.
"""
return pulumi.get(self, "default_cooldown")
@default_cooldown.setter
def default_cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_cooldown", value)
@property
@pulumi.getter(name="desiredCapacity")
def desired_capacity(self) -> Optional[pulumi.Input[int]]:
"""
Expected number of ECS instances in the scaling group. Value range: [min_size, max_size].
"""
return pulumi.get(self, "desired_capacity")
@desired_capacity.setter
def desired_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "desired_capacity", value)
@property
@pulumi.getter(name="groupDeletionProtection")
def group_deletion_protection(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the scaling group deletion protection is enabled. `true` or `false`, Default value: `false`.
"""
return pulumi.get(self, "group_deletion_protection")
@group_deletion_protection.setter
def group_deletion_protection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "group_deletion_protection", value)
@property
@pulumi.getter(name="loadbalancerIds")
def loadbalancer_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
If a Server Load Balancer instance is specified in the scaling group, the scaling group automatically attaches its ECS instances to the Server Load Balancer instance.
- The Server Load Balancer instance must be enabled.
- At least one listener must be configured for each Server Load Balancer and it HealthCheck must be on. Otherwise, creation will fail (it may be useful to add a `depends_on` argument
targeting your `slb.Listener` in order to make sure the listener with its HealthCheck configuration is ready before creating your scaling group).
- The Server Load Balancer instance attached with VPC-type ECS instances cannot be attached to the scaling group.
- The default weight of an ECS instance attached to the Server Load Balancer instance is 50.
"""
return pulumi.get(self, "loadbalancer_ids")
@loadbalancer_ids.setter
def loadbalancer_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "loadbalancer_ids", value)
@property
@pulumi.getter(name="maxSize")
def max_size(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of ECS instances in the scaling group. Value range: [0, 1000].
"""
return pulumi.get(self, "max_size")
@max_size.setter
def max_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_size", value)
@property
@pulumi.getter(name="minSize")
def min_size(self) -> Optional[pulumi.Input[int]]:
"""
Minimum number of ECS instances in the scaling group. Value range: [0, 1000].
"""
return pulumi.get(self, "min_size")
@min_size.setter
def min_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_size", value)
@property
@pulumi.getter(name="multiAzPolicy")
def multi_az_policy(self) -> Optional[pulumi.Input[str]]:
"""
Multi-AZ scaling group ECS instance expansion and contraction strategy. PRIORITY, BALANCE or COST_OPTIMIZED(Available in 1.54.0+).
"""
return pulumi.get(self, "multi_az_policy")
@multi_az_policy.setter
def multi_az_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "multi_az_policy", value)
@property
@pulumi.getter(name="onDemandBaseCapacity")
def on_demand_base_capacity(self) -> Optional[pulumi.Input[int]]:
"""
The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.
"""
return pulumi.get(self, "on_demand_base_capacity")
@on_demand_base_capacity.setter
def on_demand_base_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "on_demand_base_capacity", value)
@property
@pulumi.getter(name="onDemandPercentageAboveBaseCapacity")
def on_demand_percentage_above_base_capacity(self) -> Optional[pulumi.Input[int]]:
"""
Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity.
"""
return pulumi.get(self, "on_demand_percentage_above_base_capacity")
@on_demand_percentage_above_base_capacity.setter
def on_demand_percentage_above_base_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "on_demand_percentage_above_base_capacity", value)
@property
@pulumi.getter(name="removalPolicies")
def removal_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
RemovalPolicy is used to select the ECS instances you want to remove from the scaling group when multiple candidates for removal exist. Optional values:
- OldestInstance: removes the ECS instance that is added to the scaling group at the earliest point in time.
- NewestInstance: removes the ECS instance that is added to the scaling group at the latest point in time.
- OldestScalingConfiguration: removes the ECS instance that is created based on the earliest scaling configuration.
- Default values: Default value of RemovalPolicy.1: OldestScalingConfiguration. Default value of RemovalPolicy.2: OldestInstance.
"""
return pulumi.get(self, "removal_policies")
@removal_policies.setter
def removal_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "removal_policies", value)
@property
@pulumi.getter(name="scalingGroupName")
def scaling_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Name shown for the scaling group, which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain numbers, underscores `_`, hyphens `-`, and decimal points `.`. If this parameter is not specified, the default value is ScalingGroupId.
"""
return pulumi.get(self, "scaling_group_name")
@scaling_group_name.setter
def scaling_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scaling_group_name", value)
@property
@pulumi.getter(name="spotInstancePools")
def spot_instance_pools(self) -> Optional[pulumi.Input[int]]:
"""
The number of Spot pools to use to allocate your Spot capacity. The Spot pools is composed of instance types of lowest price.
"""
return pulumi.get(self, "spot_instance_pools")
@spot_instance_pools.setter
def spot_instance_pools(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "spot_instance_pools", value)
@property
@pulumi.getter(name="spotInstanceRemedy")
def spot_instance_remedy(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to replace spot instances with newly created spot/onDemand instance when receive a spot recycling message.
"""
return pulumi.get(self, "spot_instance_remedy")
@spot_instance_remedy.setter
def spot_instance_remedy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "spot_instance_remedy", value)
@property
@pulumi.getter(name="vswitchId")
def vswitch_id(self) -> Optional[pulumi.Input[str]]:
"""
It has been deprecated from version 1.7.1 and new field 'vswitch_ids' replaces it.
"""
return pulumi.get(self, "vswitch_id")
@vswitch_id.setter
def vswitch_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vswitch_id", value)
@property
@pulumi.getter(name="vswitchIds")
def vswitch_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of virtual switch IDs in which the ecs instances to be launched.
"""
return pulumi.get(self, "vswitch_ids")
@vswitch_ids.setter
def vswitch_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "vswitch_ids", value)
class ScalingGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
db_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
default_cooldown: Optional[pulumi.Input[int]] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
group_deletion_protection: Optional[pulumi.Input[bool]] = None,
loadbalancer_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_size: Optional[pulumi.Input[int]] = None,
min_size: Optional[pulumi.Input[int]] = None,
multi_az_policy: Optional[pulumi.Input[str]] = None,
on_demand_base_capacity: Optional[pulumi.Input[int]] = None,
on_demand_percentage_above_base_capacity: Optional[pulumi.Input[int]] = None,
removal_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
scaling_group_name: Optional[pulumi.Input[str]] = None,
spot_instance_pools: Optional[pulumi.Input[int]] = None,
spot_instance_remedy: Optional[pulumi.Input[bool]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None,
vswitch_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
## Import
ESS scaling group can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ess/scalingGroup:ScalingGroup example asg-abc123456
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] db_instance_ids: If an RDS instance is specified in the scaling group, the scaling group automatically attaches the Intranet IP addresses of its ECS instances to the RDS access whitelist.
- The specified RDS instance must be in running status.
- The specified RDS instance’s whitelist must have room for more IP addresses.
:param pulumi.Input[int] default_cooldown: Default cool-down time (in seconds) of the scaling group. Value range: [0, 86400]. The default value is 300s.
:param pulumi.Input[int] desired_capacity: Expected number of ECS instances in the scaling group. Value range: [min_size, max_size].
:param pulumi.Input[bool] group_deletion_protection: Specifies whether the scaling group deletion protection is enabled. `true` or `false`, Default value: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] loadbalancer_ids: If a Server Load Balancer instance is specified in the scaling group, the scaling group automatically attaches its ECS instances to the Server Load Balancer instance.
- The Server Load Balancer instance must be enabled.
- At least one listener must be configured for each Server Load Balancer and it HealthCheck must be on. Otherwise, creation will fail (it may be useful to add a `depends_on` argument
targeting your `slb.Listener` in order to make sure the listener with its HealthCheck configuration is ready before creating your scaling group).
- The Server Load Balancer instance attached with VPC-type ECS instances cannot be attached to the scaling group.
- The default weight of an ECS instance attached to the Server Load Balancer instance is 50.
:param pulumi.Input[int] max_size: Maximum number of ECS instances in the scaling group. Value range: [0, 1000].
:param pulumi.Input[int] min_size: Minimum number of ECS instances in the scaling group. Value range: [0, 1000].
:param pulumi.Input[str] multi_az_policy: Multi-AZ scaling group ECS instance expansion and contraction strategy. PRIORITY, BALANCE or COST_OPTIMIZED(Available in 1.54.0+).
:param pulumi.Input[int] on_demand_base_capacity: The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.
:param pulumi.Input[int] on_demand_percentage_above_base_capacity: Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity.
:param pulumi.Input[Sequence[pulumi.Input[str]]] removal_policies: RemovalPolicy is used to select the ECS instances you want to remove from the scaling group when multiple candidates for removal exist. Optional values:
- OldestInstance: removes the ECS instance that is added to the scaling group at the earliest point in time.
- NewestInstance: removes the ECS instance that is added to the scaling group at the latest point in time.
- OldestScalingConfiguration: removes the ECS instance that is created based on the earliest scaling configuration.
- Default values: Default value of RemovalPolicy.1: OldestScalingConfiguration. Default value of RemovalPolicy.2: OldestInstance.
:param pulumi.Input[str] scaling_group_name: Name shown for the scaling group, which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain numbers, underscores `_`, hyphens `-`, and decimal points `.`. If this parameter is not specified, the default value is ScalingGroupId.
:param pulumi.Input[int] spot_instance_pools: The number of Spot pools to use to allocate your Spot capacity. The Spot pools is composed of instance types of lowest price.
:param pulumi.Input[bool] spot_instance_remedy: Whether to replace spot instances with newly created spot/onDemand instance when receive a spot recycling message.
:param pulumi.Input[str] vswitch_id: It has been deprecated from version 1.7.1 and new field 'vswitch_ids' replaces it.
:param pulumi.Input[Sequence[pulumi.Input[str]]] vswitch_ids: List of virtual switch IDs in which the ecs instances to be launched.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ScalingGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
ESS scaling group can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ess/scalingGroup:ScalingGroup example asg-abc123456
```
:param str resource_name: The name of the resource.
:param ScalingGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ScalingGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
db_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
default_cooldown: Optional[pulumi.Input[int]] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
group_deletion_protection: Optional[pulumi.Input[bool]] = None,
loadbalancer_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_size: Optional[pulumi.Input[int]] = None,
min_size: Optional[pulumi.Input[int]] = None,
multi_az_policy: Optional[pulumi.Input[str]] = None,
on_demand_base_capacity: Optional[pulumi.Input[int]] = None,
on_demand_percentage_above_base_capacity: Optional[pulumi.Input[int]] = None,
removal_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
scaling_group_name: Optional[pulumi.Input[str]] = None,
spot_instance_pools: Optional[pulumi.Input[int]] = None,
spot_instance_remedy: Optional[pulumi.Input[bool]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None,
vswitch_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ScalingGroupArgs.__new__(ScalingGroupArgs)
__props__.__dict__["db_instance_ids"] = db_instance_ids
__props__.__dict__["default_cooldown"] = default_cooldown
__props__.__dict__["desired_capacity"] = desired_capacity
__props__.__dict__["group_deletion_protection"] = group_deletion_protection
__props__.__dict__["loadbalancer_ids"] = loadbalancer_ids
if max_size is None and not opts.urn:
raise TypeError("Missing required property 'max_size'")
__props__.__dict__["max_size"] = max_size
if min_size is None and not opts.urn:
raise TypeError("Missing required property 'min_size'")
__props__.__dict__["min_size"] = min_size
__props__.__dict__["multi_az_policy"] = multi_az_policy
__props__.__dict__["on_demand_base_capacity"] = on_demand_base_capacity
__props__.__dict__["on_demand_percentage_above_base_capacity"] = on_demand_percentage_above_base_capacity
__props__.__dict__["removal_policies"] = removal_policies
__props__.__dict__["scaling_group_name"] = scaling_group_name
__props__.__dict__["spot_instance_pools"] = spot_instance_pools
__props__.__dict__["spot_instance_remedy"] = spot_instance_remedy
if vswitch_id is not None and not opts.urn:
warnings.warn("""Field 'vswitch_id' has been deprecated from provider version 1.7.1, and new field 'vswitch_ids' can replace it.""", DeprecationWarning)
pulumi.log.warn("""vswitch_id is deprecated: Field 'vswitch_id' has been deprecated from provider version 1.7.1, and new field 'vswitch_ids' can replace it.""")
__props__.__dict__["vswitch_id"] = vswitch_id
__props__.__dict__["vswitch_ids"] = vswitch_ids
super(ScalingGroup, __self__).__init__(
'alicloud:ess/scalingGroup:ScalingGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
db_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
default_cooldown: Optional[pulumi.Input[int]] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
group_deletion_protection: Optional[pulumi.Input[bool]] = None,
loadbalancer_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_size: Optional[pulumi.Input[int]] = None,
min_size: Optional[pulumi.Input[int]] = None,
multi_az_policy: Optional[pulumi.Input[str]] = None,
on_demand_base_capacity: Optional[pulumi.Input[int]] = None,
on_demand_percentage_above_base_capacity: Optional[pulumi.Input[int]] = None,
removal_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
scaling_group_name: Optional[pulumi.Input[str]] = None,
spot_instance_pools: Optional[pulumi.Input[int]] = None,
spot_instance_remedy: Optional[pulumi.Input[bool]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None,
vswitch_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'ScalingGroup':
"""
Get an existing ScalingGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] db_instance_ids: If an RDS instance is specified in the scaling group, the scaling group automatically attaches the Intranet IP addresses of its ECS instances to the RDS access whitelist.
- The specified RDS instance must be in running status.
- The specified RDS instance’s whitelist must have room for more IP addresses.
:param pulumi.Input[int] default_cooldown: Default cool-down time (in seconds) of the scaling group. Value range: [0, 86400]. The default value is 300s.
:param pulumi.Input[int] desired_capacity: Expected number of ECS instances in the scaling group. Value range: [min_size, max_size].
:param pulumi.Input[bool] group_deletion_protection: Specifies whether the scaling group deletion protection is enabled. `true` or `false`, Default value: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] loadbalancer_ids: If a Server Load Balancer instance is specified in the scaling group, the scaling group automatically attaches its ECS instances to the Server Load Balancer instance.
- The Server Load Balancer instance must be enabled.
- At least one listener must be configured for each Server Load Balancer and it HealthCheck must be on. Otherwise, creation will fail (it may be useful to add a `depends_on` argument
targeting your `slb.Listener` in order to make sure the listener with its HealthCheck configuration is ready before creating your scaling group).
- The Server Load Balancer instance attached with VPC-type ECS instances cannot be attached to the scaling group.
- The default weight of an ECS instance attached to the Server Load Balancer instance is 50.
:param pulumi.Input[int] max_size: Maximum number of ECS instances in the scaling group. Value range: [0, 1000].
:param pulumi.Input[int] min_size: Minimum number of ECS instances in the scaling group. Value range: [0, 1000].
:param pulumi.Input[str] multi_az_policy: Multi-AZ scaling group ECS instance expansion and contraction strategy. PRIORITY, BALANCE or COST_OPTIMIZED(Available in 1.54.0+).
:param pulumi.Input[int] on_demand_base_capacity: The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.
:param pulumi.Input[int] on_demand_percentage_above_base_capacity: Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity.
:param pulumi.Input[Sequence[pulumi.Input[str]]] removal_policies: RemovalPolicy is used to select the ECS instances you want to remove from the scaling group when multiple candidates for removal exist. Optional values:
- OldestInstance: removes the ECS instance that is added to the scaling group at the earliest point in time.
- NewestInstance: removes the ECS instance that is added to the scaling group at the latest point in time.
- OldestScalingConfiguration: removes the ECS instance that is created based on the earliest scaling configuration.
- Default values: Default value of RemovalPolicy.1: OldestScalingConfiguration. Default value of RemovalPolicy.2: OldestInstance.
:param pulumi.Input[str] scaling_group_name: Name shown for the scaling group, which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain numbers, underscores `_`, hyphens `-`, and decimal points `.`. If this parameter is not specified, the default value is ScalingGroupId.
:param pulumi.Input[int] spot_instance_pools: The number of Spot pools to use to allocate your Spot capacity. The Spot pools is composed of instance types of lowest price.
:param pulumi.Input[bool] spot_instance_remedy: Whether to replace spot instances with newly created spot/onDemand instance when receive a spot recycling message.
:param pulumi.Input[str] vswitch_id: It has been deprecated from version 1.7.1 and new field 'vswitch_ids' replaces it.
:param pulumi.Input[Sequence[pulumi.Input[str]]] vswitch_ids: List of virtual switch IDs in which the ecs instances to be launched.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ScalingGroupState.__new__(_ScalingGroupState)
__props__.__dict__["db_instance_ids"] = db_instance_ids
__props__.__dict__["default_cooldown"] = default_cooldown
__props__.__dict__["desired_capacity"] = desired_capacity
__props__.__dict__["group_deletion_protection"] = group_deletion_protection
__props__.__dict__["loadbalancer_ids"] = loadbalancer_ids
__props__.__dict__["max_size"] = max_size
__props__.__dict__["min_size"] = min_size
__props__.__dict__["multi_az_policy"] = multi_az_policy
__props__.__dict__["on_demand_base_capacity"] = on_demand_base_capacity
__props__.__dict__["on_demand_percentage_above_base_capacity"] = on_demand_percentage_above_base_capacity
__props__.__dict__["removal_policies"] = removal_policies
__props__.__dict__["scaling_group_name"] = scaling_group_name
__props__.__dict__["spot_instance_pools"] = spot_instance_pools
__props__.__dict__["spot_instance_remedy"] = spot_instance_remedy
__props__.__dict__["vswitch_id"] = vswitch_id
__props__.__dict__["vswitch_ids"] = vswitch_ids
return ScalingGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dbInstanceIds")
def db_instance_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
If an RDS instance is specified in the scaling group, the scaling group automatically attaches the Intranet IP addresses of its ECS instances to the RDS access whitelist.
- The specified RDS instance must be in running status.
- The specified RDS instance’s whitelist must have room for more IP addresses.
"""
return pulumi.get(self, "db_instance_ids")
@property
@pulumi.getter(name="defaultCooldown")
def default_cooldown(self) -> pulumi.Output[Optional[int]]:
"""
Default cool-down time (in seconds) of the scaling group. Value range: [0, 86400]. The default value is 300s.
"""
return pulumi.get(self, "default_cooldown")
@property
@pulumi.getter(name="desiredCapacity")
def desired_capacity(self) -> pulumi.Output[Optional[int]]:
"""
Expected number of ECS instances in the scaling group. Value range: [min_size, max_size].
"""
return pulumi.get(self, "desired_capacity")
@property
@pulumi.getter(name="groupDeletionProtection")
def group_deletion_protection(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether the scaling group deletion protection is enabled. `true` or `false`, Default value: `false`.
"""
return pulumi.get(self, "group_deletion_protection")
@property
@pulumi.getter(name="loadbalancerIds")
def loadbalancer_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
If a Server Load Balancer instance is specified in the scaling group, the scaling group automatically attaches its ECS instances to the Server Load Balancer instance.
- The Server Load Balancer instance must be enabled.
- At least one listener must be configured for each Server Load Balancer and it HealthCheck must be on. Otherwise, creation will fail (it may be useful to add a `depends_on` argument
targeting your `slb.Listener` in order to make sure the listener with its HealthCheck configuration is ready before creating your scaling group).
- The Server Load Balancer instance attached with VPC-type ECS instances cannot be attached to the scaling group.
- The default weight of an ECS instance attached to the Server Load Balancer instance is 50.
"""
return pulumi.get(self, "loadbalancer_ids")
@property
@pulumi.getter(name="maxSize")
def max_size(self) -> pulumi.Output[int]:
"""
Maximum number of ECS instances in the scaling group. Value range: [0, 1000].
"""
return pulumi.get(self, "max_size")
@property
@pulumi.getter(name="minSize")
def min_size(self) -> pulumi.Output[int]:
"""
Minimum number of ECS instances in the scaling group. Value range: [0, 1000].
"""
return pulumi.get(self, "min_size")
@property
@pulumi.getter(name="multiAzPolicy")
def multi_az_policy(self) -> pulumi.Output[Optional[str]]:
"""
Multi-AZ scaling group ECS instance expansion and contraction strategy. PRIORITY, BALANCE or COST_OPTIMIZED(Available in 1.54.0+).
"""
return pulumi.get(self, "multi_az_policy")
@property
@pulumi.getter(name="onDemandBaseCapacity")
def on_demand_base_capacity(self) -> pulumi.Output[int]:
"""
The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.
"""
return pulumi.get(self, "on_demand_base_capacity")
@property
@pulumi.getter(name="onDemandPercentageAboveBaseCapacity")
def on_demand_percentage_above_base_capacity(self) -> pulumi.Output[int]:
"""
Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity.
"""
return pulumi.get(self, "on_demand_percentage_above_base_capacity")
@property
@pulumi.getter(name="removalPolicies")
def removal_policies(self) -> pulumi.Output[Sequence[str]]:
"""
RemovalPolicy is used to select the ECS instances you want to remove from the scaling group when multiple candidates for removal exist. Optional values:
- OldestInstance: removes the ECS instance that is added to the scaling group at the earliest point in time.
- NewestInstance: removes the ECS instance that is added to the scaling group at the latest point in time.
- OldestScalingConfiguration: removes the ECS instance that is created based on the earliest scaling configuration.
- Default values: Default value of RemovalPolicy.1: OldestScalingConfiguration. Default value of RemovalPolicy.2: OldestInstance.
"""
return pulumi.get(self, "removal_policies")
@property
@pulumi.getter(name="scalingGroupName")
def scaling_group_name(self) -> pulumi.Output[Optional[str]]:
"""
Name shown for the scaling group, which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain numbers, underscores `_`, hyphens `-`, and decimal points `.`. If this parameter is not specified, the default value is ScalingGroupId.
"""
return pulumi.get(self, "scaling_group_name")
@property
@pulumi.getter(name="spotInstancePools")
def spot_instance_pools(self) -> pulumi.Output[int]:
"""
The number of Spot pools to use to allocate your Spot capacity. The Spot pools is composed of instance types of lowest price.
"""
return pulumi.get(self, "spot_instance_pools")
@property
@pulumi.getter(name="spotInstanceRemedy")
def spot_instance_remedy(self) -> pulumi.Output[bool]:
"""
Whether to replace spot instances with newly created spot/onDemand instance when receive a spot recycling message.
"""
return pulumi.get(self, "spot_instance_remedy")
@property
@pulumi.getter(name="vswitchId")
def vswitch_id(self) -> pulumi.Output[Optional[str]]:
"""
It has been deprecated from version 1.7.1 and new field 'vswitch_ids' replaces it.
"""
return pulumi.get(self, "vswitch_id")
@property
@pulumi.getter(name="vswitchIds")
def vswitch_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of virtual switch IDs in which the ecs instances to be launched.
"""
return pulumi.get(self, "vswitch_ids")
| 63.202518
| 357
| 0.701056
| 7,764
| 60,232
| 5.250773
| 0.042761
| 0.071234
| 0.064317
| 0.03076
| 0.958324
| 0.951701
| 0.950033
| 0.946084
| 0.940712
| 0.931759
| 0
| 0.00554
| 0.214819
| 60,232
| 952
| 358
| 63.268908
| 0.856469
| 0.459158
| 0
| 0.862782
| 1
| 0.011278
| 0.142211
| 0.033368
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163534
| false
| 0.00188
| 0.009399
| 0
| 0.270677
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
98a47190cc84d4bc1283f8db6a02aaf9ff6f540b
| 24
|
py
|
Python
|
pyutilib/misc/tests/import_data/a/tfile.py
|
PyUtilib/PyUtilib
|
d99406f2af1fb62268c34453a2fbe6bd4a7348f0
|
[
"BSD-3-Clause"
] | 24
|
2016-04-02T10:00:02.000Z
|
2021-03-02T16:40:18.000Z
|
pyutilib/misc/tests/import_data/a/tfile.py
|
PyUtilib/PyUtilib
|
d99406f2af1fb62268c34453a2fbe6bd4a7348f0
|
[
"BSD-3-Clause"
] | 105
|
2015-10-29T03:29:58.000Z
|
2021-12-30T22:00:45.000Z
|
pyutilib/misc/tests/import_data/a/tfile.py
|
PyUtilib/PyUtilib
|
d99406f2af1fb62268c34453a2fbe6bd4a7348f0
|
[
"BSD-3-Clause"
] | 22
|
2016-01-21T15:35:25.000Z
|
2021-05-15T20:17:44.000Z
|
def f():
return 'a'
| 8
| 14
| 0.458333
| 4
| 24
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 24
| 2
| 15
| 12
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
7f64c055c79010c73ed3892d762e1e2188042db8
| 133
|
py
|
Python
|
opener.py
|
jegor377/MyOwnVoiceAssistant
|
fab85bb9d80cb3b4dd0e544e5c1baf6102b47b98
|
[
"MIT"
] | null | null | null |
opener.py
|
jegor377/MyOwnVoiceAssistant
|
fab85bb9d80cb3b4dd0e544e5c1baf6102b47b98
|
[
"MIT"
] | null | null | null |
opener.py
|
jegor377/MyOwnVoiceAssistant
|
fab85bb9d80cb3b4dd0e544e5c1baf6102b47b98
|
[
"MIT"
] | null | null | null |
class Opener():
keywords = []
def has_keyword(self, keyword):
return keyword in self.keywords
def do_job(self, target):
pass
| 16.625
| 33
| 0.706767
| 19
| 133
| 4.842105
| 0.684211
| 0.23913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180451
| 133
| 8
| 34
| 16.625
| 0.844037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
7faf778d464e690cac86a6490d63101e4a832031
| 54,540
|
py
|
Python
|
bot.py
|
wjm/BangumiTelegramBot
|
e919d073891d83840b032684097c5596c6a75081
|
[
"MIT"
] | null | null | null |
bot.py
|
wjm/BangumiTelegramBot
|
e919d073891d83840b032684097c5596c6a75081
|
[
"MIT"
] | null | null | null |
bot.py
|
wjm/BangumiTelegramBot
|
e919d073891d83840b032684097c5596c6a75081
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
'''
https://bangumi.github.io/api/
'''
import json
import telebot
import requests
import datetime
from config import BOT_TOKEN, APP_ID, APP_SECRET, WEBSITE_BASE, BOT_USERNAME
# 请求TG Bot api
bot = telebot.TeleBot(BOT_TOKEN)
# 绑定 Bangumi
@bot.message_handler(commands=['start'])
def send_start(message):
if message.chat.type == "private": # 当私人聊天
test_id = message.from_user.id
if data_seek_get(test_id) == 'yes':
bot.send_message(message.chat.id, "已绑定", timeout=20)
else:
text = {'请绑定您的Bangumi'}
url= f'{WEBSITE_BASE}oauth_index?tg_id={test_id}'
markup = telebot.types.InlineKeyboardMarkup()
markup.add(telebot.types.InlineKeyboardButton(text='绑定Bangumi',url=url))
bot.send_message(message.chat.id, text=text, parse_mode='Markdown', reply_markup=markup ,timeout=20)
else:
if message.text == f'/start@{BOT_USERNAME}':
bot.send_message(message.chat.id, '请私聊我进行Bangumi绑定', parse_mode='Markdown' ,timeout=20)
else:
pass
# 查询 Bangumi 用户收藏统计
@bot.message_handler(commands=['my'])
def send_my(message):
message_data = message.text.split(' ')
test_id = message.from_user.id
if len(message_data) == 1:
if data_seek_get(test_id) == 'no':
bot.send_message(message.chat.id, "未绑定Bangumi,请私聊使用[/start](https://t.me/"+BOT_USERNAME+"?start=none)进行绑定", parse_mode='Markdown', timeout=20)
else:
msg = bot.send_message(message.chat.id, "正在查询请稍后...", reply_to_message_id=message.message_id, parse_mode='Markdown', timeout=20)
access_token = user_data_get(test_id).get('access_token')
params = {'app_id': APP_ID}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Authorization': 'Bearer ' + access_token}
url = 'https://api.bgm.tv/user/' + str(user_data_get(test_id).get('user_id')) + '/collections/status'
r = requests.get(url=url, params=params, headers=headers)
startus_data = json.loads(r.text)
if startus_data == None:
bot.delete_message(message.chat.id, message_id=msg.message_id, timeout=20)
bot.send_message(message.chat.id, text='您没有观看记录,快去bgm上点几个格子吧~', parse_mode='Markdown', timeout=20)
else:
book = None
book_do = 0
book_collect = 0
for i in startus_data:
if i.get('name') == 'book':
book = i.get('collects')
for i in book:
if i.get('status').get('type') == 'do':
book_do = i.get('count')
if i.get('status').get('type') == 'collect':
book_collect = i.get('count')
anime = None
anime_do = 0
anime_collect = 0
for i in startus_data:
if i.get('name') == 'anime':
anime = i.get('collects')
for i in anime:
if i.get('status').get('type') == 'do':
anime_do = i.get('count')
if i.get('status').get('type') == 'collect':
anime_collect = i.get('count')
music = None
music_do = 0
music_collect = 0
for i in startus_data:
if i.get('name') == 'music':
music = i.get('collects')
for i in music:
if i.get('status').get('type') == 'do':
music_do = i.get('count')
if i.get('status').get('type') == 'collect':
music_collect = i.get('count')
game = None
game_do = 0
game_collect = 0
for i in startus_data:
if i.get('name') == 'game':
game = i.get('collects')
for i in game:
if i.get('status').get('type') == 'do':
game_do = i.get('count')
if i.get('status').get('type') == 'collect':
game_collect = i.get('count')
text = {'*Bangumi 用户数据统计:\n\n'+
bgmuser_data(test_id)['nickname'] +'*\n'
'➤ 动画:`'+ str(anime_do) +'在看,'+ str(anime_collect) +'看过`\n'
'➤ 图书:`'+ str(book_do) +'在读,'+ str(book_collect) +'读过`\n'
'➤ 音乐:`'+ str(music_do) +'在听,'+ str(music_collect) +'听过`\n'
'➤ 游戏:`'+ str(game_do) +'在玩,'+ str(game_collect) +'玩过`\n\n'
'[🏠 个人主页](https://bgm.tv/user/'+ str(user_data_get(test_id).get('user_id')) +')\n'
}
img_url = 'https://bgm.tv/chart/img/' + str(user_data_get(test_id).get('user_id'))
bot.delete_message(message.chat.id, message_id=msg.message_id, timeout=20)
bot.send_photo(chat_id=message.chat.id, photo=img_url, caption=text, parse_mode='Markdown')
# bot.send_message(message.chat.id, text=text, parse_mode='Markdown', timeout=20)
else:
username = message_data[1]
msg = bot.send_message(message.chat.id, "正在查询请稍后...", reply_to_message_id=message.message_id, parse_mode='Markdown', timeout=20)
params = {'app_id': APP_ID}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'}
url = 'https://api.bgm.tv/user/' + username + '/collections/status'
r = requests.get(url=url, params=params, headers=headers)
startus_data = json.loads(r.text)
try:
if startus_data.get('code') == 404:
bot.delete_message(message.chat.id, message_id=msg.message_id, timeout=20)
bot.send_message(message.chat.id, text='出错了,没有查询到该用户', parse_mode='Markdown', timeout=20)
except AttributeError:
if startus_data == None:
bot.delete_message(message.chat.id, message_id=msg.message_id, timeout=20)
bot.send_message(message.chat.id, text='您没有观看记录,快去bgm上点几个格子吧~', parse_mode='Markdown', timeout=20)
else:
book = None
book_do = 0
book_collect = 0
for i in startus_data:
if i.get('name') == 'book':
book = i.get('collects')
for i in book:
if i.get('status').get('type') == 'do':
book_do = i.get('count')
if i.get('status').get('type') == 'collect':
book_collect = i.get('count')
anime = None
anime_do = 0
anime_collect = 0
for i in startus_data:
if i.get('name') == 'anime':
anime = i.get('collects')
for i in anime:
if i.get('status').get('type') == 'do':
anime_do = i.get('count')
if i.get('status').get('type') == 'collect':
anime_collect = i.get('count')
music = None
music_do = 0
music_collect = 0
for i in startus_data:
if i.get('name') == 'music':
music = i.get('collects')
for i in music:
if i.get('status').get('type') == 'do':
music_do = i.get('count')
if i.get('status').get('type') == 'collect':
music_collect = i.get('count')
game = None
game_do = 0
game_collect = 0
for i in startus_data:
if i.get('name') == 'game':
game = i.get('collects')
for i in game:
if i.get('status').get('type') == 'do':
game_do = i.get('count')
if i.get('status').get('type') == 'collect':
game_collect = i.get('count')
url = 'https://api.bgm.tv/user/' + username
r2 = requests.get(url=url, headers=headers)
user_data = json.loads(r2.text)
nickname = user_data.get('nickname') # 获取用户昵称
uid = user_data.get('id') #获取用户UID
text = {'*Bangumi 用户数据统计:\n\n'+
nickname +'*\n'
'➤ 动画:`'+ str(anime_do) +'在看,'+ str(anime_collect) +'看过`\n'
'➤ 图书:`'+ str(book_do) +'在读,'+ str(book_collect) +'读过`\n'
'➤ 音乐:`'+ str(music_do) +'在听,'+ str(music_collect) +'听过`\n'
'➤ 游戏:`'+ str(game_do) +'在玩,'+ str(game_collect) +'玩过`\n\n'
f'[🏠 个人主页](https://bgm.tv/user/{uid})\n'
}
img_url = f'https://bgm.tv/chart/img/{uid}'
bot.delete_message(message.chat.id, message_id=msg.message_id, timeout=20)
bot.send_photo(chat_id=message.chat.id, photo=img_url, caption=text, parse_mode='Markdown')
# 动画条目搜索/查询 Bangumi 用户在看动画
@bot.message_handler(commands=['anime'])
def send_anime(message):
message_data = message.text.split(' ')
test_id = message.from_user.id
if len(message_data) == 1: # 查询 Bangumi 用户在看动画
if data_seek_get(test_id) == 'no':
bot.send_message(message.chat.id, "未绑定Bangumi,请私聊使用[/start](https://t.me/"+BOT_USERNAME+"?start=none)进行绑定", parse_mode='Markdown', timeout=20)
else:
msg = bot.send_message(message.chat.id, "正在查询请稍后...", reply_to_message_id=message.message_id, parse_mode='Markdown', timeout=20)
access_token = user_data_get(test_id).get('access_token')
params = {'subject_type': 2,
'type': 3,
'limit': 5, # 每页条数
'offset': 0 # 开始页
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Authorization': 'Bearer ' + access_token}
url = 'https://api.bgm.tv/v0/users/'+bgmuser_data(test_id)['username']+'/collections'
try:
r = requests.get(url=url, params=params, headers=headers)
except requests.ConnectionError:
r = requests.get(url=url, params=params, headers=headers)
anime_data = json.loads(r.text)
anime_count = anime_data.get('total') # 总在看数 int
subject_id_li = [i['subject_id'] for i in anime_data.get('data')] # subject_id 列表 int
name_li = [subject_info_get(subject_id)['name'] for subject_id in subject_id_li] # 番剧名字 str
name_cn_li = [subject_info_get(subject_id)['name_cn'] for subject_id in subject_id_li] # 番剧中文名字 str
if subject_id_li == []:
bot.delete_message(message.chat.id, message_id=msg.message_id, timeout=20)
bot.send_message(message.chat.id, text='出错啦,您貌似没有收藏的再看', parse_mode='Markdown', timeout=20)
else:
markup = telebot.types.InlineKeyboardMarkup()
no_li = list(range(1, len(subject_id_li)+ 1))
markup.add(*[telebot.types.InlineKeyboardButton(text=item[0],callback_data='anime_do'+'|'+str(test_id)+'|'+str(item[1])+'|0'+'|0') for item in list(zip(no_li,subject_id_li))], row_width=5)
if anime_count > 5:
markup.add(telebot.types.InlineKeyboardButton(text='下一页',callback_data='anime_do_page'+'|'+str(test_id)+'|'+'5'))
eps_li = [eps_get(test_id, subject_id)['progress'] for subject_id in subject_id_li]
anime_text_data = ''.join(['*['+str(a)+']* '+b+'\n'+c+' `['+ d +']`\n\n' for a,b,c,d in zip(no_li,name_li,name_cn_li,eps_li)])
text = {'*'+ bgmuser_data(test_id)['nickname'] +' 在看的动画*\n\n'+
anime_text_data +
'共'+ str(anime_count) +'部'}
bot.delete_message(message.chat.id, message_id=msg.message_id, timeout=20)
bot.send_message(message.chat.id, text=text, parse_mode='Markdown', reply_markup=markup , timeout=20)
else: # 动画条目搜索
msg = bot.send_message(message.chat.id, "正在搜索请稍后...", reply_to_message_id=message.message_id, parse_mode='Markdown', timeout=20)
anime_search_keywords = message_data[1]
subject_type = 2 # 条目类型 1 = book 2 = anime 3 = music 4 = game 6 = real
start = 0
search_results_n = search_get(anime_search_keywords, subject_type, start)['search_results_n'] # 搜索结果数量
if search_results_n == 0:
bot.send_message(message.chat.id, text='抱歉,没能搜索到您想要的内容', parse_mode='Markdown', timeout=20)
else:
search_subject_id_li = search_get(anime_search_keywords, subject_type, start)['subject_id_li'] # 所有查询结果id列表
search_name_li = search_get(anime_search_keywords, subject_type, start)['name_li'] # 所有查询结果名字列表
markup = telebot.types.InlineKeyboardMarkup()
for item in list(zip(search_name_li,search_subject_id_li)):
markup.add(telebot.types.InlineKeyboardButton(text=item[0],callback_data='animesearch'+'|'+str(anime_search_keywords)+'|'+str(item[1])+'|'+'0'+'|0'))
if search_results_n > 5:
markup.add(telebot.types.InlineKeyboardButton(text='下一页',callback_data='spage'+'|'+str(anime_search_keywords)+'|'+'5'))
text = {'*关于您的 “*`'+ str(anime_search_keywords) +'`*” 搜索结果*\n\n'+
'🔍 共'+ str(search_results_n) +'个结果'}
bot.delete_message(message.chat.id, message_id=msg.message_id, timeout=20)
bot.send_message(message.chat.id, text=text, parse_mode='Markdown', reply_markup=markup , timeout=20)
# 每日放送查询
@bot.message_handler(commands=['week'])
def send_week(message):
data = message.text.split(' ')
if len(data) == 2:
day = data[1]
if data[0] == "/week" and day.isnumeric():
if 1<=int(day)<=7:
week_data=week_text(day)
msg = bot.send_message(message.chat.id, "正在搜索请稍后...", reply_to_message_id=message.message_id, parse_mode='Markdown', timeout=20)
text = week_data['text']
markup = week_data['markup']
bot.delete_message(message.chat.id, message_id=msg.message_id, timeout=20)
bot.send_message(message.chat.id, text=text, parse_mode='Markdown', reply_markup=markup , timeout=20)
else:
bot.send_message(message.chat.id, "输入错误 请输入:`/week 1~7`", parse_mode='Markdown', timeout=20)
else:
bot.send_message(message.chat.id, "输入错误 请输入:`/week 1~7`", parse_mode='Markdown', timeout=20)
else:
bot.send_message(message.chat.id, "输入错误 请输入:`/week 1~7`", parse_mode='Markdown', timeout=20)
# 判断是否绑定Bangumi
def data_seek_get(test_id):
with open('bgm_data.json') as f: # 打开文件
data_seek = json.loads(f.read()) # 读取
data_li = [i['tg_user_id'] for i in data_seek] # 写入列表
if int(test_id) in data_li: # 判断列表内是否有被验证的UID
data_back = 'yes'
else:
data_back = 'no'
return data_back
# 获取用户数据
def user_data_get(test_id):
with open('bgm_data.json') as f:
data_seek = json.loads(f.read())
user_data = None
for i in data_seek:
if i.get('tg_user_id') == test_id:
expiry_time = i.get('expiry_time')
now_time = datetime.datetime.now().strftime("%Y%m%d")
if now_time >= expiry_time: # 判断密钥是否过期
user_data = expiry_data_get(test_id)
else:
user_data = i.get('data',{})
return user_data
# 更新过期用户数据
def expiry_data_get(test_id):
with open('bgm_data.json') as f:
data_seek = json.loads(f.read())
refresh_token = None
for i in data_seek:
if i.get('tg_user_id') == test_id:
refresh_token = i.get('data',{}).get('refresh_token')
CALLBACK_URL = f'{WEBSITE_BASE}oauth_callback'
resp = requests.post(
'https://bgm.tv/oauth/access_token',
data={
'grant_type': 'refresh_token',
'client_id': APP_ID,
'client_secret': APP_SECRET,
'refresh_token': refresh_token,
'redirect_uri': CALLBACK_URL,
},
headers = {
"User-Agent": "",
}
)
access_token = json.loads(resp.text).get('access_token') #更新access_token
refresh_token = json.loads(resp.text).get('refresh_token') #更新refresh_token
expiry_time = (datetime.datetime.now()+datetime.timedelta(days=7)).strftime("%Y%m%d")#更新过期时间
# 替换数据
if access_token or refresh_token != None:
with open("bgm_data.json", 'r+', encoding='utf-8') as f:
data = json.load(f)
for i in data:
if i['tg_user_id'] == test_id:
i['data']['access_token'] = access_token
i['data']['refresh_token'] = refresh_token
i['expiry_time'] = expiry_time
f.seek(0)
json.dump(data, f, ensure_ascii=False, indent=4)
f.truncate()
# 读取数据
with open('bgm_data.json') as f:
data_seek = json.loads(f.read())
user_data = None
for i in data_seek:
if i.get('tg_user_id') == test_id:
user_data = i.get('data',{})
return user_data
# 获取BGM用户信息
def bgmuser_data(test_id):
access_token = user_data_get(test_id).get('access_token')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Authorization': 'Bearer ' + access_token}
url = 'https://api.bgm.tv/user/' + str(user_data_get(test_id).get('user_id'))
try:
r = requests.get(url=url, headers=headers)
except requests.ConnectionError:
r = requests.get(url=url, headers=headers)
user_data = json.loads(r.text)
nickname = user_data.get('nickname')
username = user_data.get('username')
user_data = {
'nickname': nickname, # 用户昵称 str
'username': username # 用户username 没有设置则返回 uid str
}
return user_data
# 获取用户观看eps
def eps_get(test_id, subject_id):
access_token = user_data_get(test_id).get('access_token')
params = {
'subject_id': subject_id,
'type': 0}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Authorization': 'Bearer ' + access_token}
url = 'https://api.bgm.tv/v0/episodes'
try:
r = requests.get(url=url, params=params, headers=headers)
except requests.ConnectionError:
r = requests.get(url=url, params=params, headers=headers)
data_eps = json.loads(r.text).get('data')
epsid_li = [i['id'] for i in data_eps] # 所有eps_id
params = {
'subject_id': subject_id}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Authorization': 'Bearer ' + access_token}
url = 'https://api.bgm.tv/user/' + str(user_data_get(test_id).get('user_id')) + '/progress'
try:
r = requests.get(url=url, params=params, headers=headers)
except requests.ConnectionError:
r = requests.get(url=url, params=params, headers=headers)
try:
data_watched = json.loads(r.text).get('eps')
except AttributeError:
watched_id_li = [0] # 无观看集数
else:
watched_id_li = [i['id'] for i in data_watched] # 已观看 eps_id
eps_n = len(set(epsid_li)) # 总集数
watched_n = len(set(epsid_li) & set(watched_id_li)) # 已观看了集数
unwatched_id = epsid_li # 去除已观看过集数的 eps_id
try:
for watched_li in watched_id_li:
unwatched_id.remove(watched_li)
except ValueError:
pass
# 输出
eps_data = {'progress': str(watched_n) + '/' + str(eps_n), # 已观看/总集数 进度 str
'watched': str(watched_n), # 已观看集数 str
'eps_n': str(eps_n), # 总集数 str
'unwatched_id': unwatched_id} # 未观看 eps_di list
return eps_data
# 剧集信息获取 不需Access Token
def subject_info_get(subject_id):
with open('subject_info_data.json', encoding='utf-8') as f:
info_data = json.loads(f.read())
id_li = [i['subject_id'] for i in info_data]
if int(subject_id) in id_li:
name = [i['name'] for i in info_data if i['subject_id'] == int(subject_id)][0]
name_cn = [i['name_cn'] for i in info_data if i['subject_id'] == int(subject_id)][0]
eps_count = [i['eps_count'] for i in info_data if i['subject_id'] == int(subject_id)][0]
air_date = [i['air_date'] for i in info_data if i['subject_id'] == int(subject_id)][0]
platform = [i['platform'] for i in info_data if i['subject_id'] == int(subject_id)][0]
air_weekday = [i['air_weekday'] for i in info_data if i['subject_id'] == int(subject_id)][0]
score = [i['score'] for i in info_data if i['subject_id'] == int(subject_id)][0]
# 输出
subject_info_data = {'name' : name, # 剧集名 str
'name_cn': name_cn, # 剧集中文名 str
'eps_count': eps_count, # 总集数 int
'air_date': air_date, # 放送开始日期 str
'platform': platform, # 放送类型 str
'air_weekday': air_weekday, # 每周放送星期 str
'score': score} # BGM 评分 int
else:
params = {'responseGroup': 'large'}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'}
url = f'https://api.bgm.tv/v0/subjects/{subject_id}'
try:
r = requests.get(url=url, params=params, headers=headers)
except requests.ConnectionError:
r = requests.get(url=url, params=params, headers=headers)
info_data = json.loads(r.text)
name = info_data.get('name')
name_cn = info_data.get('name_cn')
eps_count = info_data.get('eps')
air_date = info_data.get('date')
platform = info_data.get('platform')
try:
air_weekday = [i['value'] for i in info_data.get('infobox') if i['key'] == '放送星期'][0]
except IndexError:
air_weekday = 'None'
try:
score = info_data.get('rating').get('score')
except AttributeError:
score = 0
# 输出
subject_info_data = {'subject_id': int(subject_id),
'name' : name, # 剧集名 str
'name_cn': name_cn, # 剧集中文名 str
'eps_count': eps_count, # 总集数 int
'air_date': air_date, # 放送开始日期 str
'platform': platform, # 放送类型 str
'air_weekday': air_weekday, # 每周放送星期 str
'score': score} # BGM 评分 int
with open("subject_info_data.json", 'r+', encoding='utf-8') as f: # 打开文件
try:
data = json.load(f) # 读取
except:
data = [] # 空文件
data.append(subject_info_data) # 添加
f.seek(0, 0) # 重新定位回开头
json.dump(data, f, ensure_ascii=False, indent=4) # 写入
return subject_info_data
# 更新收视进度状态
def eps_status_get(test_id, eps_id, status):
access_token = user_data_get(test_id).get('access_token')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Authorization': 'Bearer ' + access_token}
url = f'https://api.bgm.tv/ep/{eps_id}/status/{status}'
r = requests.get(url=url, headers=headers)
return r
# 更新收藏状态
def collection_post(test_id, subject_id, status, rating):
access_token = user_data_get(test_id).get('access_token')
if rating == None or rating == 0:
params = {"status": (None, status)}
else:
params = {"status": (None, status),"rating": (None, rating)}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Authorization': 'Bearer ' + access_token}
url = f'https://api.bgm.tv/collection/{subject_id}/update'
r = requests.post(url=url, files=params, headers=headers)
return r
# 获取用户评分
def user_rating_get(test_id, subject_id):
access_token = user_data_get(test_id).get('access_token')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Authorization': 'Bearer ' + access_token}
url = f'https://api.bgm.tv/collection/{subject_id}'
r = requests.get(url=url, headers=headers)
user_rating_data = json.loads(r.text)
try:
user_startus = user_rating_data.get('status',{}).get('type')
except:
user_startus = 'collect'
user_rating = user_rating_data.get('rating')
user_rating_data = {'user_startus': user_startus, # 用户收藏状态 str
'user_rating': user_rating} # 用户评分 int
return user_rating_data
# 动画简介图片获取 不需Access Token
def anime_img(subject_id):
anime_name = subject_info_get(subject_id)['name']
query = '''
query ($id: Int, $page: Int, $perPage: Int, $search: String) {
Page (page: $page, perPage: $perPage) {
pageInfo {
total
currentPage
lastPage
hasNextPage
perPage
}
media (id: $id, search: $search) {
id
title {
romaji
}
}
}
}
'''
variables = {
'search': anime_name,
'page': 1,
'perPage': 1
}
url = 'https://graphql.anilist.co'
try:
r = requests.post(url, json={'query': query, 'variables': variables})
except requests.ConnectionError:
r = requests.post(url, json={'query': query, 'variables': variables})
anilist_data = json.loads(r.text).get('data').get('Page').get('media')
try:
anilist_id = [i['id'] for i in anilist_data][0]
except IndexError:
img_url = None
else:
img_url = f'https://img.anili.st/media/{anilist_id}'
return img_url
# 条目搜索 不需Access Token
def search_get(keywords, type, start):
max_results = 5 # 每页最大条数 5 个
params = {
'type': type,
'start': start,
'max_results': max_results}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'}
url = f'https://api.bgm.tv/search/subject/{keywords}'
try:
r = requests.get(url=url, params=params, headers=headers)
except requests.ConnectionError:
r = requests.get(url=url, params=params, headers=headers)
try:
data_search = json.loads(r.text)
except:
search_results_n = 0
subject_id_li = []
name_li = []
else:
search_results_n = data_search.get('results')
subject_id_data = data_search.get('list')
subject_id_li = [i['id'] for i in subject_id_data]
name_li = [i['name'] for i in subject_id_data]
# 输出
search_data = {'search_results_n': search_results_n, # 搜索结果数量 int
'subject_id_li': subject_id_li, # 所有查询结果id列表 list
'name_li': name_li} # 所有查询结果名字列表 list
return search_data
# 每日放送查询输出文字及其按钮
def week_text(day):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',}
url = 'https://api.bgm.tv/calendar'
try:
r = requests.get(url=url, headers=headers)
except requests.ConnectionError:
r = requests.get(url=url, headers=headers)
week_data = json.loads(r.text)
for i in week_data:
if i.get('weekday',{}).get('id') == int(day):
items = i.get('items')
subject_id_li = [i['id'] for i in items]
name_li = [i['name'] for i in items]
name_cn_li = [i['name_cn'] for i in items]
no_li = list(range(1, len(subject_id_li)+ 1))
markup = telebot.types.InlineKeyboardMarkup()
markup.add(*[telebot.types.InlineKeyboardButton(text=item[0],callback_data='animesearch'+'|'+'week'+'|'+str(item[1])+'|'+str(day)+'|0') for item in list(zip(no_li,subject_id_li))])
air_weekday = str(day).replace('1', '星期一').replace('2', '星期二').replace('3', '星期三').replace('4', '星期四').replace('5', '星期五').replace('6', '星期六').replace('7', '星期日') # 放送日期
text_data = ''.join(['*['+str(a)+']* '+b+'\n'+c+'\n\n' for a,b,c in zip(no_li,name_li,name_cn_li)])
anime_count = len(subject_id_li)
text = {'*在 '+ air_weekday +' 放送的节目*\n\n'+
text_data +
'共'+ str(anime_count) +'部'}
week_text_data = {
'text': text, # 查询文字
'markup': markup # 按钮
}
return week_text_data
# 动画再看详情
@bot.callback_query_handler(func=lambda call: call.data.split('|')[0] == 'anime_do')
def anime_do_callback(call):
tg_from_id = call.from_user.id
test_id = int(call.data.split('|')[1])
subject_id = call.data.split('|')[2]
back = int(call.data.split('|')[3])
back_page = call.data.split('|')[4]
if tg_from_id == test_id:
img_url = anime_img(subject_id)
text = {'*'+ subject_info_get(subject_id)['name_cn'] +'*\n'
''+ subject_info_get(subject_id)['name'] +'\n\n'
'BGM ID:`' + str(subject_id) + '`\n'
'➤ BGM 平均评分:`'+ str(subject_info_get(subject_id)['score']) +'`🌟\n'
'➤ 您的评分:`'+ str(user_rating_get(test_id, subject_id)['user_rating']) +'`🌟\n'
'➤ 放送类型:`'+ subject_info_get(subject_id)['platform'] +'`\n'
'➤ 放送开始:`'+ subject_info_get(subject_id)['air_date'] + '`\n'
'➤ 放送星期:`'+ subject_info_get(subject_id)['air_weekday'] + '`\n'
'➤ 观看进度:`'+ eps_get(test_id, subject_id)['progress'] + '`\n\n'
'💬 [吐槽箱](https://bgm.tv/subject/'+ str(subject_id) +'/comments)\n'}
markup = telebot.types.InlineKeyboardMarkup()
unwatched_id = eps_get(test_id, subject_id)['unwatched_id']
if unwatched_id == []:
markup.add(telebot.types.InlineKeyboardButton(text='返回',callback_data='anime_do_page'+'|'+str(test_id)+'|'+back_page),telebot.types.InlineKeyboardButton(text='评分',callback_data='rating'+'|'+str(test_id)+'|'+'0'+'|'+str(subject_id)+'|'+back_page))
markup.add(telebot.types.InlineKeyboardButton(text='收藏管理',callback_data='collection'+'|'+str(tg_from_id)+'|'+str(subject_id)+'|'+'anime_do'+'|'+'0'+'|'+'null'+'|'+back_page))
else:
markup.add(telebot.types.InlineKeyboardButton(text='返回',callback_data='anime_do_page'+'|'+str(test_id)+'|'+back_page),telebot.types.InlineKeyboardButton(text='评分',callback_data='rating'+'|'+str(test_id)+'|'+'0'+'|'+str(subject_id)+'|'+back_page),telebot.types.InlineKeyboardButton(text='已看最新',callback_data='anime_eps'+'|'+str(test_id)+'|'+str(unwatched_id[0])+'|'+str(subject_id)+'|'+back_page))
markup.add(telebot.types.InlineKeyboardButton(text='收藏管理',callback_data='collection'+'|'+str(tg_from_id)+'|'+str(subject_id)+'|'+'anime_do'+'|'+'0'+'|'+'null'+'|'+back_page))
if back == 1:
if call.message.content_type == 'photo':
bot.edit_message_caption(caption=text, chat_id=call.message.chat.id , message_id=call.message.message_id, parse_mode='Markdown', reply_markup=markup)
else:
bot.edit_message_text(text=text, parse_mode='Markdown', chat_id=call.message.chat.id , message_id=call.message.message_id, reply_markup=markup)
else:
bot.delete_message(chat_id=call.message.chat.id , message_id=call.message.message_id, timeout=20) # 删除用户在看动画列表消息
if img_url == None: # 是否有动画简介图片
bot.send_message(chat_id=call.message.chat.id, text=text, parse_mode='Markdown', reply_markup=markup, timeout=20)
else:
bot.send_photo(chat_id=call.message.chat.id, photo=img_url, caption=text, parse_mode='Markdown', reply_markup=markup)
# bot.edit_message_text(text=text, parse_mode='Markdown', chat_id=call.message.chat.id , message_id=call.message.message_id, reply_markup=markup)
else:
bot.answer_callback_query(call.id, text='和你没关系,别点了~', show_alert=True)
# 评分
@bot.callback_query_handler(func=lambda call: call.data.split('|')[0] == 'rating')
def rating_callback(call):
tg_from_id = call.from_user.id
test_id = int(call.data.split('|')[1])
if tg_from_id == test_id:
rating_data = int(call.data.split('|')[2])
subject_id = call.data.split('|')[3]
back_page = call.data.split('|')[4]
subject_info = subject_info_get(subject_id)
if rating_data != 0:
status = user_rating_get(test_id, subject_id)['user_startus']
collection_post(test_id, subject_id, status, str(rating_data))
text = {f'*{subject_info["name_cn"]}*\n'\
f'{subject_info["name"]}\n\n'\
f'BGM ID:`{ str(subject_id) }`\n\n'\
f'➤ BGM 平均评分:`{ str(subject_info["score"]) }`🌟\n'\
f'➤ 您的评分:`{str(user_rating_get(test_id, subject_id)["user_rating"]) }`🌟\n\n'\
f'➤ 观看进度:`{eps_get(test_id, subject_id)["progress"] }`\n\n'\
f'💬 [吐槽箱](https://bgm.tv/subject/{ str(subject_id) }/comments)\n\n'\
f'请点按下列数字进行评分'}
markup = telebot.types.InlineKeyboardMarkup()
markup.add(telebot.types.InlineKeyboardButton(text='返回',callback_data='anime_do'+'|'+str(test_id)+'|'+str(subject_id)+'|1'+'|'+back_page),
*[telebot.types.InlineKeyboardButton(text=str(i),callback_data='rating|{}|{}|{}|{}'.format(str(test_id),str(i),str(subject_id),back_page)) for i in range(1,11)])
if call.message.content_type == 'photo':
bot.edit_message_caption(caption=text, chat_id=call.message.chat.id , message_id=call.message.message_id, parse_mode='Markdown', reply_markup=markup)
else:
bot.edit_message_text(text=text, parse_mode='Markdown', chat_id=call.message.chat.id , message_id=call.message.message_id, reply_markup=markup)
else:
bot.answer_callback_query(call.id, text='和你没关系,别点了~', show_alert=True)
# 已看最新
@bot.callback_query_handler(func=lambda call: call.data.split('|')[0] == 'anime_eps')
def anime_eps_callback(call):
tg_from_id = call.from_user.id
test_id = int(call.data.split('|')[1])
if tg_from_id == test_id:
eps_id = int(call.data.split('|')[2])
try:
remove = call.data.split('|')[5]
if remove == 'remove':
eps_status_get(test_id, eps_id, 'remove') # 更新观看进度为撤销
bot.send_message(chat_id=call.message.chat.id, text='已撤销,已看最新集数', parse_mode='Markdown', timeout=20)
except IndexError:
eps_status_get(test_id, eps_id, 'watched') # 更新观看进度为看过
subject_id = int(call.data.split('|')[3])
back_page = call.data.split('|')[4]
rating = str(user_rating_get(test_id, subject_id)['user_rating'])
text = {'*'+ subject_info_get(subject_id)['name_cn'] +'*\n'
''+ subject_info_get(subject_id)['name'] +'\n\n'
'BGM ID:`' + str(subject_id) + '`\n'
'➤ BGM 平均评分:`'+ str(subject_info_get(subject_id)['score']) +'`🌟\n'
'➤ 您的评分:`'+ str(rating) +'`🌟\n'
'➤ 放送类型:`'+ subject_info_get(subject_id)['platform'] +'`\n'
'➤ 放送开始:`'+ subject_info_get(subject_id)['air_date'] + '`\n'
'➤ 放送星期:`'+ subject_info_get(subject_id)['air_weekday'] + '`\n'
'➤ 观看进度:`'+ eps_get(test_id, subject_id)['progress'] + '`\n\n'
'💬 [吐槽箱](https://bgm.tv/subject/'+ str(subject_id) +'/comments)\n'
'📝 [第'+ eps_get(test_id, subject_id)['watched'] +'话评论](https://bgm.tv/ep/'+ str(eps_id) +')\n'}
markup = telebot.types.InlineKeyboardMarkup()
unwatched_id = eps_get(test_id, subject_id)['unwatched_id']
if unwatched_id == []:
status = 'collect'
collection_post(test_id, subject_id, status, rating) # 看完最后一集自动更新收藏状态为看过
markup.add(telebot.types.InlineKeyboardButton(text='返回',callback_data='anime_do_page'+'|'+str(test_id)+'|'+back_page),telebot.types.InlineKeyboardButton(text='评分',callback_data='rating'+'|'+str(test_id)+'|'+'0'+'|'+str(subject_id)+'|'+back_page))
markup.add(telebot.types.InlineKeyboardButton(text='收藏管理',callback_data='collection'+'|'+str(tg_from_id)+'|'+str(subject_id)+'|'+'anime_do'+'|'+'0'+'|'+'null'+'|'+back_page),telebot.types.InlineKeyboardButton(text='撤销最新观看',callback_data='anime_eps'+'|'+str(test_id)+'|'+str(eps_id)+'|'+str(subject_id)+'|'+back_page+'|remove'))
else:
markup.add(telebot.types.InlineKeyboardButton(text='返回',callback_data='anime_do_page'+'|'+str(test_id)+'|'+back_page),telebot.types.InlineKeyboardButton(text='评分',callback_data='rating'+'|'+str(test_id)+'|'+'0'+'|'+str(subject_id)+'|'+back_page),telebot.types.InlineKeyboardButton(text='已看最新',callback_data='anime_eps'+'|'+str(test_id)+'|'+str(unwatched_id[0])+'|'+str(subject_id)+'|'+back_page))
markup.add(telebot.types.InlineKeyboardButton(text='收藏管理',callback_data='collection'+'|'+str(tg_from_id)+'|'+str(subject_id)+'|'+'anime_do'+'|'+'0'+'|'+'null'+'|'+back_page),telebot.types.InlineKeyboardButton(text='撤销最新观看',callback_data='anime_eps'+'|'+str(test_id)+'|'+str(eps_id)+'|'+str(subject_id)+'|'+back_page+'|remove'))
if call.message.content_type == 'photo':
bot.edit_message_caption(caption=text, chat_id=call.message.chat.id , message_id=call.message.message_id, parse_mode='Markdown', reply_markup=markup)
else:
bot.edit_message_text(text=text, parse_mode='Markdown', chat_id=call.message.chat.id , message_id=call.message.message_id, reply_markup=markup)
# bot.edit_message_text(text=text, parse_mode='Markdown', chat_id=call.message.chat.id , message_id=call.message.message_id, reply_markup=markup)
else:
bot.answer_callback_query(call.id, text='和你没关系,别点了~', show_alert=True)
# 动画再看详情页返回翻页
@bot.callback_query_handler(func=lambda call: call.data.split('|')[0] == 'anime_do_page')
def anime_do_page_callback(call):
test_id = int(call.data.split('|')[1])
offset = int(call.data.split('|')[2])
tg_from_id = call.from_user.id
if tg_from_id == test_id:
access_token = user_data_get(test_id).get('access_token')
params = {'subject_type': 2,
'type': 3,
'limit': 5, # 每页条数
'offset': offset # 开始页
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Authorization': 'Bearer ' + access_token}
url = 'https://api.bgm.tv/v0/users/'+bgmuser_data(test_id)['username']+'/collections'
try:
r = requests.get(url=url, params=params, headers=headers)
except requests.ConnectionError:
r = requests.get(url=url, params=params, headers=headers)
anime_data = json.loads(r.text)
anime_count = anime_data.get('total') # 总在看数 int
subject_id_li = [i['subject_id'] for i in anime_data.get('data')] # subject_id 列表 int
name_li = [subject_info_get(subject_id)['name'] for subject_id in subject_id_li] # 番剧名字 str
name_cn_li = [subject_info_get(subject_id)['name_cn'] for subject_id in subject_id_li] # 番剧中文名字 str
markup = telebot.types.InlineKeyboardMarkup()
no_li = list(range(1, len(subject_id_li)+ 1))
markup.add(*[telebot.types.InlineKeyboardButton(text=item[0],callback_data='anime_do'+'|'+str(test_id)+'|'+str(item[1])+'|0'+'|'+str(offset)) for item in list(zip(no_li,subject_id_li))], row_width=5)
if anime_count <= 5:
markup.add()
elif offset == 0:
markup.add(telebot.types.InlineKeyboardButton(text='下一页',callback_data='anime_do_page'+'|'+str(test_id)+'|'+str(offset+5)))
elif offset+5 >= anime_count:
markup.add(telebot.types.InlineKeyboardButton(text='上一页',callback_data='anime_do_page'+'|'+str(test_id)+'|'+str(offset-5)))
else:
markup.add(telebot.types.InlineKeyboardButton(text='上一页',callback_data='anime_do_page'+'|'+str(test_id)+'|'+str(offset-5)),telebot.types.InlineKeyboardButton(text='下一页',callback_data='anime_do_page'+'|'+str(test_id)+'|'+str(offset+5)))
eps_li = [eps_get(test_id, subject_id)['progress'] for subject_id in subject_id_li]
anime_text_data = ''.join(['*['+str(a)+']* '+b+'\n'+c+' `['+ d +']`\n\n' for a,b,c,d in zip(no_li,name_li,name_cn_li,eps_li)])
text = {'*'+ bgmuser_data(test_id)['nickname'] +' 在看的动画*\n\n'+
anime_text_data +
'共'+ str(anime_count) +'部'}
if call.message.content_type == 'photo':
bot.delete_message(chat_id=call.message.chat.id , message_id=call.message.message_id, timeout=20)
bot.send_message(chat_id=call.message.chat.id, text=text, parse_mode='Markdown', reply_markup=markup, timeout=20)
else:
bot.edit_message_text(text=text, parse_mode='Markdown', chat_id=call.message.chat.id , message_id=call.message.message_id, reply_markup=markup)
else:
bot.answer_callback_query(call.id, text='和你没关系,别点了~', show_alert=True)
# 搜索翻页
@bot.callback_query_handler(func=lambda call: call.data.split('|')[0] == 'spage')
def spage_callback(call):
anime_search_keywords = call.data.split('|')[1]
start = int(call.data.split('|')[2])
subject_type = 2 # 条目类型 1 = book 2 = anime 3 = music 4 = game 6 = real
search_results_n = search_get(anime_search_keywords, subject_type, start)['search_results_n'] # 搜索结果数量
if search_results_n == 0:
text= '已经没有了'
else:
search_subject_id_li = search_get(anime_search_keywords, subject_type, start)['subject_id_li'] # 所有查询结果id列表
search_name_li = search_get(anime_search_keywords, subject_type, start)['name_li'] # 所有查询结果名字列表
markup = telebot.types.InlineKeyboardMarkup()
for item in list(zip(search_name_li,search_subject_id_li)):
markup.add(telebot.types.InlineKeyboardButton(text=item[0],callback_data='animesearch'+'|'+str(anime_search_keywords)+'|'+str(item[1])+'|'+str(start)+'|0'))
if search_results_n <= 5:
markup.add()
elif start == 0:
markup.add(telebot.types.InlineKeyboardButton(text='下一页',callback_data='spage'+'|'+str(anime_search_keywords)+'|'+str(start+5)))
elif start+5 >= search_results_n:
markup.add(telebot.types.InlineKeyboardButton(text='上一页',callback_data='spage'+'|'+str(anime_search_keywords)+'|'+str(start-5)))
else:
markup.add(telebot.types.InlineKeyboardButton(text='上一页',callback_data='spage'+'|'+str(anime_search_keywords)+'|'+str(start-5)),telebot.types.InlineKeyboardButton(text='下一页',callback_data='spage'+'|'+str(anime_search_keywords)+'|'+str(start+5)))
text = {'*关于您的 “*`'+ str(anime_search_keywords) +'`*” 搜索结果*\n\n'+
'🔍 共'+ str(search_results_n) +'个结果'}
if call.message.content_type == 'photo':
bot.delete_message(chat_id=call.message.chat.id , message_id=call.message.message_id, timeout=20)
bot.send_message(chat_id=call.message.chat.id, text=text, parse_mode='Markdown', reply_markup=markup, timeout=20)
else:
bot.edit_message_text(text=text, parse_mode='Markdown', chat_id=call.message.chat.id , message_id=call.message.message_id, reply_markup=markup)
# 搜索动画详情页
@bot.callback_query_handler(func=lambda call: call.data.split('|')[0] == 'animesearch')
def animesearch_callback(call):
anime_search_keywords = call.data.split('|')[1]
subject_id = call.data.split('|')[2]
start = int(call.data.split('|')[3])
back = int(call.data.split('|')[4])
img_url = anime_img(subject_id)
text = {'*'+ subject_info_get(subject_id)['name_cn'] +'*\n'
''+ subject_info_get(subject_id)['name'] +'\n\n'
'BGM ID:`' + str(subject_id) + '`\n'
'➤ BGM 平均评分:`'+ str(subject_info_get(subject_id)['score']) +'`🌟\n'
'➤ 放送类型:`'+ subject_info_get(subject_id)['platform'] +'`\n'
'➤ 集数:共`'+ str(subject_info_get(subject_id)['eps_count']) +'`集\n'
'➤ 放送开始:`'+ subject_info_get(subject_id)['air_date'] + '`\n'
'➤ 放送星期:`'+ subject_info_get(subject_id)['air_weekday'] + '`\n\n'
'📖 [详情](https://bgm.tv/subject/'+ str(subject_id) +')\n'
'💬 [吐槽箱](https://bgm.tv/subject/'+ str(subject_id) +'/comments)\n'}
markup = telebot.types.InlineKeyboardMarkup()
if anime_search_keywords == 'week':
tg_from_id = call.from_user.id
markup.add(telebot.types.InlineKeyboardButton(text='返回',callback_data='back_week'+'|'+str(start)), telebot.types.InlineKeyboardButton(text='收藏',callback_data='collection'+'|'+str(tg_from_id)+'|'+str(subject_id)+'|'+str(anime_search_keywords)+'|'+str(start)+'|'+'null'))
else:
tg_from_id = call.from_user.id
markup.add(telebot.types.InlineKeyboardButton(text='返回',callback_data='spage'+'|'+str(anime_search_keywords)+'|'+str(start)), telebot.types.InlineKeyboardButton(text='收藏',callback_data='collection'+'|'+str(tg_from_id)+'|'+str(subject_id)+'|'+str(anime_search_keywords)+'|'+str(start)+'|'+'null'))
if back == 1:
if call.message.content_type == 'photo':
bot.edit_message_caption(caption=text, chat_id=call.message.chat.id , message_id=call.message.message_id, parse_mode='Markdown', reply_markup=markup)
else:
bot.edit_message_text(text=text, parse_mode='Markdown', chat_id=call.message.chat.id , message_id=call.message.message_id, reply_markup=markup)
else:
bot.delete_message(chat_id=call.message.chat.id , message_id=call.message.message_id, timeout=20)
if img_url == None:
bot.send_message(chat_id=call.message.chat.id, text=text, parse_mode='Markdown', reply_markup=markup, timeout=20)
else:
bot.send_photo(chat_id=call.message.chat.id, photo=img_url, caption=text, parse_mode='Markdown', reply_markup=markup)
# 收藏
@bot.callback_query_handler(func=lambda call: call.data.split('|')[0] == 'collection')
def collection_callback(call):
test_id = int(call.data.split('|')[1])
subject_id = call.data.split('|')[2]
anime_search_keywords = call.data.split('|')[3]
start = call.data.split('|')[4]
status = call.data.split('|')[5]
tg_from_id = call.from_user.id
if status == 'null':
if data_seek_get(tg_from_id) == 'no':
bot.send_message(chat_id=call.message.chat.id, text='您未绑定Bangumi,请私聊使用[/start](https://t.me/'+BOT_USERNAME+'?start=none)进行绑定', parse_mode='Markdown', timeout=20)
else:
text = {'*您想将 “*`'+ subject_info_get(subject_id)['name'] +'`*” 收藏为*\n\n'}
markup = telebot.types.InlineKeyboardMarkup()
if anime_search_keywords == 'anime_do':
back_page = call.data.split('|')[6]
markup.add(telebot.types.InlineKeyboardButton(text='返回',callback_data='anime_do'+'|'+str(test_id)+'|'+str(subject_id)+'|1'+'|'+back_page), telebot.types.InlineKeyboardButton(text='想看',callback_data='collection'+'|'+str(test_id)+'|'+str(subject_id)+'|'+str(anime_search_keywords)+'|'+str(start)+'|'+'wish'), telebot.types.InlineKeyboardButton(text='看过',callback_data='collection'+'|'+str(test_id)+'|'+str(subject_id)+'|'+str(anime_search_keywords)+'|'+str(start)+'|'+'collect'), telebot.types.InlineKeyboardButton(text='在看',callback_data='collection'+'|'+str(test_id)+'|'+str(subject_id)+'|'+str(anime_search_keywords)+'|'+str(start)+'|'+'do'), telebot.types.InlineKeyboardButton(text='搁置',callback_data='collection'+'|'+str(test_id)+'|'+str(subject_id)+'|'+str(anime_search_keywords)+'|'+str(start)+'|'+'on_hold'), telebot.types.InlineKeyboardButton(text='抛弃',callback_data='collection'+'|'+str(test_id)+'|'+str(subject_id)+'|'+str(anime_search_keywords)+'|'+str(start)+'|'+'dropped'))
else:
markup.add(telebot.types.InlineKeyboardButton(text='返回',callback_data='animesearch'+'|'+str(anime_search_keywords)+'|'+str(subject_id)+'|'+str(start)+'|1'), telebot.types.InlineKeyboardButton(text='想看',callback_data='collection'+'|'+str(tg_from_id)+'|'+str(subject_id)+'|'+str(anime_search_keywords)+'|'+str(start)+'|'+'wish'), telebot.types.InlineKeyboardButton(text='看过',callback_data='collection'+'|'+str(tg_from_id)+'|'+str(subject_id)+'|'+str(anime_search_keywords)+'|'+str(start)+'|'+'collect'), telebot.types.InlineKeyboardButton(text='在看',callback_data='collection'+'|'+str(tg_from_id)+'|'+str(subject_id)+'|'+str(anime_search_keywords)+'|'+str(start)+'|'+'do'), telebot.types.InlineKeyboardButton(text='搁置',callback_data='collection'+'|'+str(tg_from_id)+'|'+str(subject_id)+'|'+str(anime_search_keywords)+'|'+str(start)+'|'+'on_hold'), telebot.types.InlineKeyboardButton(text='抛弃',callback_data='collection'+'|'+str(tg_from_id)+'|'+str(subject_id)+'|'+str(anime_search_keywords)+'|'+str(start)+'|'+'dropped'))
if call.message.content_type == 'photo':
bot.edit_message_caption(caption=text, chat_id=call.message.chat.id , message_id=call.message.message_id, parse_mode='Markdown', reply_markup=markup)
else:
bot.edit_message_text(text=text, parse_mode='Markdown', chat_id=call.message.chat.id , message_id=call.message.message_id, reply_markup=markup)
if status == 'wish': # 想看
if tg_from_id == test_id:
rating = str(user_rating_get(test_id, subject_id)['user_rating'])
collection_post(test_id, subject_id, status, rating)
bot.send_message(chat_id=call.message.chat.id, text='已将 “`'+ subject_info_get(subject_id)['name'] +'`” 收藏更改为想看', parse_mode='Markdown', timeout=20)
else:
bot.answer_callback_query(call.id, text='和你没关系,别点了~', show_alert=True)
if status == 'collect': # 看过
if tg_from_id == test_id:
rating = str(user_rating_get(test_id, subject_id)['user_rating'])
collection_post(test_id, subject_id, status, rating)
bot.send_message(chat_id=call.message.chat.id, text='已将 “`'+ subject_info_get(subject_id)['name'] +'`” 收藏更改为看过', parse_mode='Markdown', timeout=20)
else:
bot.answer_callback_query(call.id, text='和你没关系,别点了~', show_alert=True)
if status == 'do': # 在看
if tg_from_id == test_id:
rating = str(user_rating_get(test_id, subject_id)['user_rating'])
collection_post(test_id, subject_id, status, rating)
bot.send_message(chat_id=call.message.chat.id, text='已将 “`'+ subject_info_get(subject_id)['name'] +'`” 收藏更改为在看', parse_mode='Markdown', timeout=20)
else:
bot.answer_callback_query(call.id, text='和你没关系,别点了~', show_alert=True)
if status == 'on_hold': # 搁置
if tg_from_id == test_id:
rating = str(user_rating_get(test_id, subject_id)['user_rating'])
collection_post(test_id, subject_id, status, rating)
bot.send_message(chat_id=call.message.chat.id, text='已将 “`'+ subject_info_get(subject_id)['name'] +'`” 收藏更改为搁置', parse_mode='Markdown', timeout=20)
else:
bot.answer_callback_query(call.id, text='和你没关系,别点了~', show_alert=True)
if status == 'dropped': # 抛弃
if tg_from_id == test_id:
rating = str(user_rating_get(test_id, subject_id)['user_rating'])
collection_post(test_id, subject_id, status, rating)
bot.send_message(chat_id=call.message.chat.id, text='已将 “`'+ subject_info_get(subject_id)['name'] +'`” 收藏更改为抛弃', parse_mode='Markdown', timeout=20)
else:
bot.answer_callback_query(call.id, text='和你没关系,别点了~', show_alert=True)
# week 返回
@bot.callback_query_handler(func=lambda call: call.data.split('|')[0] == 'back_week')
def back_week_callback(call):
day = int(call.data.split('|')[1])
week_data = week_text(day)
text = week_data['text']
markup = week_data['markup']
bot.delete_message(chat_id=call.message.chat.id , message_id=call.message.message_id, timeout=20)
bot.send_message(chat_id=call.message.chat.id, text=text, parse_mode='Markdown', reply_markup=markup, timeout=20)
# 开始启动
if __name__ == '__main__':
bot.polling()
| 53.523062
| 1,034
| 0.586395
| 7,115
| 54,540
| 4.27941
| 0.061841
| 0.049954
| 0.035437
| 0.059117
| 0.834143
| 0.810628
| 0.783007
| 0.770921
| 0.755649
| 0.739523
| 0
| 0.015753
| 0.253942
| 54,540
| 1,018
| 1,035
| 53.575639
| 0.731433
| 0.028915
| 0
| 0.604624
| 0
| 0.017341
| 0.161083
| 0.006908
| 0.030058
| 0
| 0
| 0
| 0
| 1
| 0.027746
| false
| 0.002312
| 0.00578
| 0
| 0.047399
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f6c95c8c6c3ae69ae9f8f80e3b440291725790bf
| 17,788
|
py
|
Python
|
micropython/badger2040_modules_py/launchericons.py
|
nathanmayall/pimoroni-pico
|
ee12d846a125770a76e7ed331d290ce83f41a0b3
|
[
"MIT"
] | 1
|
2022-03-12T13:54:28.000Z
|
2022-03-12T13:54:28.000Z
|
micropython/badger2040_modules_py/launchericons.py
|
nathanmayall/pimoroni-pico
|
ee12d846a125770a76e7ed331d290ce83f41a0b3
|
[
"MIT"
] | null | null | null |
micropython/badger2040_modules_py/launchericons.py
|
nathanmayall/pimoroni-pico
|
ee12d846a125770a76e7ed331d290ce83f41a0b3
|
[
"MIT"
] | null | null | null |
# Code generated by data_to_py.py.
version = '0.1'
_data =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x3f\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\xf0\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x01\xff\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\xfc\x00\x00\x00'\
b'\x00\x00\x00\x0f\xfc\x00\x00\x00\x00\x00\x00\x1f\xf8\x00\x00\x00'\
b'\x00\x00\x0f\xff\xff\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xfe\x00\x00\x00'\
b'\x00\x00\x00\x7f\xff\x80\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00'\
b'\x00\x00\x3f\xff\xff\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\xff\xff\xff\xff\x80\x00'\
b'\x00\x1f\xff\xff\xff\xff\xe0\x00\x00\x00\x00\x7f\xfe\x00\x00\x00'\
b'\x00\x00\x03\xff\xff\xf0\x00\x00\x00\x00\x07\xff\xff\xe0\x00\x00'\
b'\x00\x00\x7f\xff\xff\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\xff\xff\xff\xff\xf0\x00'\
b'\x00\xff\xff\xff\xff\xff\xfc\x00\x00\x00\x00\xff\xff\x00\x00\x00'\
b'\x00\x00\x0f\xff\xff\xfc\x00\x00\x00\x00\x1f\xff\xff\xf8\x00\x00'\
b'\x00\x01\xff\xc0\x0f\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xff\xff\xff\xff\xf8\x00'\
b'\x01\xff\xff\xff\xff\xff\xfe\x00\x00\x00\x00\xfc\x3f\x00\x00\x00'\
b'\x00\x00\x1f\xff\xff\xfe\x00\x00\x00\x00\x3f\xff\xff\xfc\x00\x00'\
b'\x00\x03\xfe\x00\x01\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xff\xff\xff\xff\xf8\x00'\
b'\x03\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\xfc\x3f\x00\x00\x00'\
b'\x00\x00\x7f\xf0\x03\xff\x80\x00\x00\x00\xff\xe0\x07\xff\x00\x00'\
b'\x00\x07\xf8\x00\x00\x7f\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xff\xff\xff\xff\xf8\x00'\
b'\x03\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\xfc\x3f\x00\x00\x00'\
b'\x00\x00\xff\x80\x00\x7f\xc0\x00\x00\x01\xff\x00\x00\xff\x80\x00'\
b'\x00\x0f\xf0\x00\x00\x3f\xc0\x00\x00\x00\x03\xff\xff\xff\xfe\x00'\
b'\x00\x03\xff\x80\x03\xfe\x00\x00\x00\xff\xff\xff\xff\xff\xfc\x00'\
b'\x03\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\xfc\x3f\x00\x00\x00'\
b'\x00\x01\xfe\x00\x00\x1f\xe0\x00\x00\x03\xfc\x00\x00\x3f\xc0\x00'\
b'\x00\x0f\xc0\x00\x00\x0f\xc0\x00\x00\x00\x03\xff\xff\xff\xfe\x00'\
b'\x00\x3f\xff\xf0\x1f\xff\xf0\x00\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x00\x1f\x00\x00\x00\x00\xfc\x3f\x00\x00\x00'\
b'\x00\x03\xfc\x00\x00\x0f\xf0\x00\x00\x07\xf8\x00\x00\x1f\xe0\x00'\
b'\x00\x1f\x80\x00\x00\x07\xe0\x00\x00\x00\x03\xff\xff\xff\xfe\x00'\
b'\x00\xff\xff\xfe\x3f\xff\xfc\x00\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x00\x1f\x00\x00\x00\x00\xfc\x3f\x00\x00\x00'\
b'\x00\x03\xf0\x00\x60\x03\xf0\x00\x00\x07\xe0\x00\x00\x07\xe0\x00'\
b'\x00\x3f\x00\x00\x00\x03\xf0\x00\x00\x00\x03\xff\xff\xff\xfe\x00'\
b'\x03\xff\xff\xff\xff\xff\xff\x00\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x00\x1f\x00\x00\x00\x00\xfc\x3f\x00\x00\x00'\
b'\x00\x07\xe0\x01\xf8\x01\xf8\x00\x00\x0f\xc0\x00\x00\x03\xf0\x00'\
b'\x00\x7e\x00\x07\x00\x01\xf8\x00\x00\x00\x03\xff\xff\xff\xfe\x00'\
b'\x03\xff\xff\xff\xff\xff\xff\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x00\x1f\x00\x00\x0f\xff\xfc\x3f\xff\xf0\x00'\
b'\x00\x0f\xc0\x01\xf8\x00\xfc\x00\x00\x1f\x80\x0f\xe0\x01\xf8\x00'\
b'\x00\x7e\x00\x07\x00\x01\xf8\x00\x00\x00\x03\xff\xff\xff\xfe\x00'\
b'\x07\xfe\x00\xff\xff\x03\xff\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe3\xff\xc0\x00\x00\x1f\x00\x00\x7f\xff\xfc\x3f\xff\xfe\x00'\
b'\x00\x1f\x80\x01\xfc\x00\x7e\x00\x00\x3f\x00\x3f\xf0\x00\xfc\x00'\
b'\x00\xfc\x00\x07\x00\x00\xfc\x00\x00\x00\x03\xff\xff\xff\xfe\x00'\
b'\x07\xe0\x00\x1f\xf0\x00\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe7\xff\xc0\x00\x00\x1f\x00\x00\xff\xff\xfc\x3f\xff\xff\x00'\
b'\x00\x1f\x80\x01\xf8\x00\x7e\x00\x00\x3f\x00\x7f\xf8\x00\xfc\x00'\
b'\x00\xf8\x00\x07\x00\x00\x7c\x00\x00\x00\x00\x00\x3f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\xc0\x00\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe7\xff\xc0\x00\x00\x1f\x00\x01\xff\xff\xfc\x3f\xff\xff\x80'\
b'\x00\x3f\x00\x01\xf8\x00\x3f\x00\x00\x7e\x00\xff\xfc\x00\x7e\x00'\
b'\x00\xf8\x00\x07\x00\x00\x7c\x00\x00\x00\x00\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe7\xff\xc0\x00\x00\x1f\x00\x01\xff\xff\xfc\x3f\xff\xff\x80'\
b'\x00\x3e\x00\x00\xf0\x00\x1f\x00\x00\x7c\x01\xfc\xfe\x00\x3e\x00'\
b'\x01\xf0\x00\x07\x00\x00\x3e\x00\x00\x00\x00\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe7\xff\xc0\x00\x00\x1f\x00\x01\xf0\x00\xff\xff\x00\x0f\x80'\
b'\x00\x3e\x00\x00\x00\x00\x1f\x00\x00\x7c\x03\xf0\x3f\x00\x3e\x00'\
b'\x01\xf0\x00\x07\x00\x00\x3e\x00\x00\x00\x00\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x00\x1f\x00\x01\xf0\x00\x7f\xfe\x00\x0f\x80'\
b'\x00\x7c\x00\x00\x00\x00\x0f\x80\x00\xf8\x03\xe0\x1f\x00\x1f\x00'\
b'\x01\xf0\x00\x07\x00\x00\x3e\x00\x00\x00\x00\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x06\x1f\x00\x01\xf0\x00\x7f\xfe\x00\x0f\x80'\
b'\x00\x7c\x00\x3f\x80\x00\x0f\x80\x00\xf8\x03\xc0\x0f\x00\x1f\x00'\
b'\x01\xe0\x00\x07\x00\x00\x1e\x00\x00\xff\xff\xfc\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x0f\x1f\x00\x01\xf0\x00\x3f\xfc\x00\x0f\x80'\
b'\x00\x7c\x00\x7f\xe0\x00\x0f\x80\x00\xf8\x03\xc0\x0f\x80\x1f\x00'\
b'\x03\xe0\x00\x07\x00\x00\x1f\x00\x01\xff\xff\xfc\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x1f\x9f\x00\x01\xf0\x00\x1f\xf8\x00\x0f\x80'\
b'\x00\x78\x00\xff\xf0\x00\x07\x80\x00\xf0\x03\xc0\x0f\x80\x0f\x00'\
b'\x03\xe0\x00\x07\x00\x00\x1f\x00\x01\xff\xff\xfc\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x3f\x9f\x00\x01\xf0\x00\x07\xe0\x00\x0f\x80'\
b'\x00\xf8\x00\x7f\xf0\x00\x07\xc0\x01\xf0\x03\xc0\x0f\x80\x0f\x80'\
b'\x03\xe0\x00\x07\x00\x00\x1f\x00\x01\xff\xff\xfc\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x20\x7f\x1f\x00\x01\xf0\x00\x00\x00\x00\x0f\x80'\
b'\x00\xf8\x00\x7f\xf0\x00\x07\xc0\x01\xf0\x00\x00\x1f\x00\x0f\x80'\
b'\x03\xe0\x00\x07\x00\x00\x1f\x00\x01\xff\xff\xfc\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe7\xff\xc0\x70\xfe\x1f\x00\x01\xf0\x00\x00\x00\x00\x0f\x80'\
b'\x00\xf8\x00\x07\xf0\x00\x07\xc0\x01\xf0\x00\x00\x7f\x00\x0f\x80'\
b'\x03\xe0\x00\x07\x00\x00\x1f\x00\x01\xff\xff\xfc\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe7\xff\xc0\xf9\xfc\x1f\x00\x01\xf0\x01\xc0\x07\xfe\x0f\x80'\
b'\x00\xf8\x00\x07\xf0\x00\x07\xc0\x01\xf0\x00\x00\xfe\x00\x0f\x80'\
b'\x03\xe0\x00\x07\xc0\x00\x1f\x00\x01\xff\xff\xfc\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x08\x00\xfc\x00'\
b'\x03\xe7\xff\xc1\xff\xf8\x1f\x00\x01\xf0\x03\xe0\x07\xfe\x0f\x80'\
b'\x00\xf8\x00\x07\xf0\x00\x07\xc0\x01\xf0\x00\x03\xfe\x00\x0f\x80'\
b'\x03\xe0\x00\x07\xe0\x00\x1f\x00\x01\xff\xff\xfc\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x1c\x00\xfc\x00'\
b'\x03\xe7\xff\xc0\xff\xf0\x1f\x00\x01\xf0\x07\xf0\x07\xfe\x0f\x80'\
b'\x00\xf8\x00\x0f\xe0\x00\x07\xc0\x01\xf0\x00\x03\xf8\x00\x0f\x80'\
b'\x03\xe0\x00\x03\xf8\x00\x1f\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x00\x00\x3e\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x7f\xe0\x1f\x00\x01\xf0\x07\xf0\x07\xfe\x0f\x80'\
b'\x00\xf8\x00\x0f\xe0\x00\x07\xc0\x01\xf0\x00\x07\xf0\x00\x0f\x80'\
b'\x01\xe0\x00\x00\xfe\x00\x1e\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x01\x00\x7e\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x3f\xc0\x1f\x00\x01\xf0\x07\xf0\x00\x00\x0f\x80'\
b'\x00\xf8\x00\x0f\xe0\x00\x07\xc0\x01\xf0\x00\x07\xc0\x00\x0f\x80'\
b'\x01\xf0\x00\x00\x3f\x00\x3e\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x03\x80\x7f\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x1f\x80\x1f\x00\x01\xf0\x07\xf0\x00\x00\x0f\x80'\
b'\x00\x78\x00\x0f\xe0\x00\x07\x80\x00\xf0\x00\x0f\x80\x00\x0f\x00'\
b'\x01\xf0\x00\x00\x1f\xc0\x3e\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x07\xc0\xff\x80\xfc\x00'\
b'\x03\xe0\x00\x00\x0f\x00\x1f\x00\x01\xf0\x03\xe0\x00\x00\x0f\x80'\
b'\x00\x7c\x00\x0f\xe0\x00\x0f\x80\x00\xf8\x00\x0f\x80\x00\x1f\x00'\
b'\x01\xf0\x00\x00\x07\xe0\x3e\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x07\xe1\xff\xc0\xfc\x00'\
b'\x03\xe0\x00\x00\x06\x00\x1f\x00\x01\xf0\x00\x00\x07\xfe\x0f\x80'\
b'\x00\x7c\x00\x1f\xe0\x00\x0f\x80\x00\xf8\x00\x0f\x80\x00\x1f\x00'\
b'\x00\xf8\x00\x00\x01\xe0\x7c\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x0f\xe3\xff\xc0\xfc\x00'\
b'\x03\xe3\xff\xc0\x00\x00\x1f\x00\x01\xf0\x00\x00\x07\xfe\x0f\x80'\
b'\x00\x7c\x00\x1f\xc0\x00\x0f\x80\x00\xf8\x00\x07\x00\x00\x1f\x00'\
b'\x00\xf8\x00\x00\x00\xc0\x7c\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x1f\xf7\xff\xe0\xfc\x00'\
b'\x03\xe7\xff\xc0\x00\x00\x1f\x00\x01\xf0\x01\xc0\x07\xfe\x0f\x80'\
b'\x00\x3e\x00\x1f\xc0\x00\x1f\x00\x00\x7c\x00\x00\x00\x00\x3e\x00'\
b'\x00\xfc\x00\x00\x00\x00\xfc\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x3f\xff\xff\xf0\xfc\x00'\
b'\x03\xe7\xff\xc0\x00\x00\x1f\x00\x01\xf0\x1f\xfc\x07\xfe\x0f\x80'\
b'\x00\x3e\x00\x1f\xc0\x00\x1f\x00\x00\x7c\x00\x00\x00\x00\x3e\x00'\
b'\x00\x7e\x00\x00\x00\x01\xf8\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x7f\xff\xff\xf8\xfc\x00'\
b'\x03\xe7\xff\xc0\x00\x00\x1f\x00\x01\xf0\x3f\xfe\x00\x00\x0f\x80'\
b'\x00\x3f\x00\x1f\xc0\x00\x3f\x00\x00\x7e\x00\x02\x00\x00\x7e\x00'\
b'\x00\x7e\x00\x00\x00\x01\xf8\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xe0\x00\x0f\x80\x00\x1f\x80\x00\xfc\x7f\xff\xff\xf8\xfc\x00'\
b'\x03\xe7\xff\xc0\x00\x00\x1f\x00\x01\xf0\x7f\xff\x00\x00\x0f\x80'\
b'\x00\x1f\x80\x1f\xc7\x00\x7e\x00\x00\x3f\x00\x0f\x80\x00\xfc\x00'\
b'\x00\x3f\x00\x00\x00\x03\xf0\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xe3\xff\x0f\x80\x00\x1f\x80\x00\xfc\xff\xff\xff\xfc\xfc\x00'\
b'\x03\xe3\xff\xc0\x00\x00\x1f\x00\x01\xf0\x7f\xff\x00\x00\x0f\x80'\
b'\x00\x1f\x80\x1f\xcf\x00\x7e\x00\x00\x3f\x00\x0f\x80\x00\xfc\x00'\
b'\x00\x1f\x80\x00\x00\x07\xe0\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xff\xff\xff\x83\xff\x1f\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x00\x1f\x00\x01\xf0\x7f\xff\x00\x00\x0f\x80'\
b'\x00\x0f\xc0\x0f\xff\x00\xfc\x00\x00\x1f\x80\x0f\x80\x01\xf8\x00'\
b'\x00\x0f\xc0\x00\x00\x0f\xc0\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xff\xff\xff\x8f\xff\xff\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x00\x1f\x00\x01\xf0\x00\x00\x00\x00\x0f\x80'\
b'\x00\x07\xe0\x07\xff\x01\xf8\x00\x00\x0f\xc0\x0f\x80\x03\xf0\x00'\
b'\x00\x0f\xf0\x00\x00\x3f\xc0\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xff\xff\xff\xbf\xff\xff\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x00\x1f\x00\x01\xf0\x00\x00\x00\x00\x0f\x80'\
b'\x00\x03\xf0\x03\xfc\x03\xf0\x00\x00\x07\xe0\x07\x00\x07\xe0\x00'\
b'\x00\x07\xf8\x00\x00\x7f\x80\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xff\xff\xff\xff\xff\xff\x80\x00\xfc\x00\x00\x00\x00\xfc\x00'\
b'\x03\xe0\x00\x00\x00\x00\x1f\x00\x01\xf0\x00\x00\x00\x00\x0f\x80'\
b'\x00\x03\xfc\x00\x70\x0f\xf0\x00\x00\x07\xf8\x00\x00\x1f\xe0\x00'\
b'\x00\x03\xfe\x00\x01\xff\x00\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x07\xfe\x01\xff\xff\xff\xff\x80\x00\xff\xff\xff\xff\xff\xfc\x00'\
b'\x03\xff\xff\xff\xff\xff\xff\x00\x01\xf0\x00\x00\x00\x00\x0f\x80'\
b'\x00\x01\xfe\x00\x00\x1f\xe0\x00\x00\x03\xfc\x00\x00\x3f\xc0\x00'\
b'\x00\x01\xff\xc0\x0f\xfe\x00\x00\x00\x01\xfe\x00\x1f\xe0\x00\x00'\
b'\x03\xf0\x00\x1f\xfe\x01\xff\x80\x00\x7f\xff\xff\xff\xff\xf8\x00'\
b'\x03\xff\xff\xff\xff\xff\xff\x00\x01\xf0\x00\x00\x00\x00\x0f\x80'\
b'\x00\x00\xff\x80\x00\x7f\xc0\x00\x00\x01\xff\x00\x00\xff\x80\x00'\
b'\x00\x00\x7f\xff\xff\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x01\x80\x00\x07\xf8\x00\x3f\x00\x00\x7f\xff\xff\xff\xff\xf8\x00'\
b'\x03\xff\xff\xff\xff\xff\xff\x00\x01\xff\xff\xff\xff\xff\xff\x80'\
b'\x00\x00\x7f\xf0\x03\xff\x80\x00\x00\x00\xff\xe0\x07\xff\x00\x00'\
b'\x00\x00\x3f\xff\xff\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x01\xe0\x00\x06\x00\x00\x7f\xff\xff\xff\xff\xf8\x00'\
b'\x01\xff\xff\xff\xff\xff\xfe\x00\x01\xff\xff\xff\xff\xff\xff\x80'\
b'\x00\x00\x1f\xff\xff\xfe\x00\x00\x00\x00\x3f\xff\xff\xfc\x00\x00'\
b'\x00\x00\x0f\xff\xff\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x3f\xff\xff\xff\xff\xf0\x00'\
b'\x00\xff\xff\xff\xff\xff\xfc\x00\x00\xff\xff\xff\xff\xff\xff\x00'\
b'\x00\x00\x0f\xff\xff\xfc\x00\x00\x00\x00\x1f\xff\xff\xf8\x00\x00'\
b'\x00\x00\x01\xff\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xff\xff\xff\xff\xfe\x00'\
b'\x00\x00\x03\xff\xff\xf0\x00\x00\x00\x00\x07\xff\xff\xe0\x00\x00'\
b'\x00\x00\x00\x3f\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\xff\xff\xff\xff\xfc\x00'\
b'\x00\x00\x00\x7f\xff\x80\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x0f\xfc\x00\x00\x00\x00\x00\x00\x1f\xf8\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
_mvdata = memoryview(_data)
def data():
return _mvdata
| 66.621723
| 68
| 0.709861
| 4,370
| 17,788
| 2.888101
| 0.012357
| 0.759686
| 0.875683
| 0.981222
| 0.945646
| 0.917598
| 0.881071
| 0.858411
| 0.841296
| 0.837097
| 0
| 0.367246
| 0.015853
| 17,788
| 266
| 69
| 66.87218
| 0.353707
| 0.001799
| 0
| 0.494253
| 1
| 0.980843
| 0.923003
| 0.922834
| 0
| 1
| 0
| 0
| 0
| 1
| 0.003831
| false
| 0
| 0
| 0.003831
| 0.007663
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 16
|
f6e9a52380dc2b8e031cfef8f40c37608b5d5b58
| 3,378
|
py
|
Python
|
src/logos.py
|
cto-ai/static-site
|
62d9b258eb22770d367628d83252b6ad899a8049
|
[
"MIT"
] | 1
|
2020-05-25T18:47:53.000Z
|
2020-05-25T18:47:53.000Z
|
src/logos.py
|
cto-ai/static-site
|
62d9b258eb22770d367628d83252b6ad899a8049
|
[
"MIT"
] | null | null | null |
src/logos.py
|
cto-ai/static-site
|
62d9b258eb22770d367628d83252b6ad899a8049
|
[
"MIT"
] | 1
|
2020-03-21T05:11:07.000Z
|
2020-03-21T05:11:07.000Z
|
from cto_ai import sdk, ux
cto_terminal = """
[94m██████[39m[33m╗[39m [94m████████[39m[33m╗[39m [94m██████[39m[33m╗ [39m [94m█████[39m[33m╗[39m [94m██[39m[33m╗[39m
[94m██[39m[33m╔════╝[39m [33m╚══[39m[94m██[39m[33m╔══╝[39m [94m██[39m[33m╔═══[39m[94m██[39m[33m╗[39m [94m██[39m[33m╔══[39m[94m██[39m[33m╗[39m [94m██[39m[33m║[39m
[94m██[39m[33m║ [39m [94m ██[39m[33m║ [39m [94m██[39m[33m║[39m[94m ██[39m[33m║[39m [94m███████[39m[33m║[39m [94m██[39m[33m║[39m
[94m██[39m[33m║ [39m [94m ██[39m[33m║ [39m [94m██[39m[33m║[39m[94m ██[39m[33m║[39m [94m██[39m[33m╔══[39m[94m██[39m[33m║[39m [94m██[39m[33m║[39m
[33m╚[39m[94m██████[39m[33m╗[39m [94m ██[39m[33m║ [39m [33m╚[39m[94m██████[39m[33m╔╝[39m [94m██[39m[33m╗[39m [94m██[39m[33m║[39m[94m ██[39m[33m║[39m [94m██[39m[33m║[39m
[33m ╚═════╝[39m [33m ╚═╝ [39m [33m ╚═════╝ [39m [33m╚═╝[39m [33m╚═╝ ╚═╝[39m [33m╚═╝[39m
We’re building the world’s best developer experiences.
"""
cto_slack = """:white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square:
:white_square::white_square::black_square::black_square::white_square::white_square::black_square::black_square::black_square::white_square::white_square::white_square::black_square::black_square::black_square::white_square:
:white_square::black_square::white_square::white_square::black_square::white_square::black_square::white_square::white_square::black_square::white_square::black_square::white_square::white_square::white_square::white_square:
:white_square::black_square::white_square::white_square::black_square::white_square::black_square::black_square::black_square::white_square::white_square::white_square::black_square::black_square::white_square::white_square:
:white_square::black_square::white_square::white_square::black_square::white_square::black_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::black_square::white_square:
:white_square::white_square::black_square::black_square::white_square::white_square::black_square::white_square::white_square::white_square::white_square::black_square::black_square::black_square::white_square::white_square:
:white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square:"""
intro = """👋 Welcome to the CTO.ai Static-Site Op!
This Op will allow you to deploy a static site to a public S3 bucket. \n
❓ How does it work?
You will be prompted for your AWS access keys and a name for your bucket to store your static site. \n
ℹ️ Prerequisites
🔑 This Op will require AWS Access Key Id and AWS Access Key Secret.
Follow the link to create an AWS Access Key -> https://aws.amazon.com/premiumsupport/knowledge-center/create-access-key/
For more information, see the README. \n"""
def logo_print():
if sdk.get_interface_type() == 'terminal':
ux.print(cto_terminal)
else:
ux.print(cto_slack)
| 96.514286
| 239
| 0.688869
| 657
| 3,378
| 3.855403
| 0.161339
| 0.356099
| 0.543624
| 0.538492
| 0.743782
| 0.732728
| 0.732728
| 0.707462
| 0.707462
| 0.707462
| 0
| 0.094761
| 0.084665
| 3,378
| 35
| 240
| 96.514286
| 0.619017
| 0
| 0
| 0
| 0
| 0.5
| 0.936076
| 0.654336
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.033333
| 0
| 0.066667
| 0.1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
63f8b7bd8891aca6ae4b8312618fdd0ed555b55c
| 192
|
py
|
Python
|
tests/wrappers/django_test/polls/views.py
|
Dryja/epsagon-python
|
505b09268820593903afdce26e1bab7f64adc23b
|
[
"MIT"
] | 55
|
2018-09-30T11:46:01.000Z
|
2022-03-15T13:37:26.000Z
|
tests/wrappers/django_test/polls/views.py
|
Dryja/epsagon-python
|
505b09268820593903afdce26e1bab7f64adc23b
|
[
"MIT"
] | 323
|
2018-10-04T15:42:08.000Z
|
2022-02-20T11:26:40.000Z
|
tests/wrappers/django_test/polls/views.py
|
Dryja/epsagon-python
|
505b09268820593903afdce26e1bab7f64adc23b
|
[
"MIT"
] | 20
|
2018-10-11T14:47:16.000Z
|
2022-01-20T11:07:29.000Z
|
from django.shortcuts import render
from django.http import HttpResponse
def indexA(request):
return HttpResponse("This is A")
def indexB(request):
return HttpResponse("This is B")
| 19.2
| 36
| 0.755208
| 26
| 192
| 5.576923
| 0.615385
| 0.137931
| 0.344828
| 0.4
| 0.427586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161458
| 192
| 9
| 37
| 21.333333
| 0.900621
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 7
|
89ccccd9414f0a50b04a5c8eb67b92f391f1496f
| 100
|
py
|
Python
|
cloudtropy/__init__.py
|
pedroramaciotti/Cloudtropy
|
bce1cc1cd6c5217ac20cf5a98491d10c6a8905b2
|
[
"MIT"
] | null | null | null |
cloudtropy/__init__.py
|
pedroramaciotti/Cloudtropy
|
bce1cc1cd6c5217ac20cf5a98491d10c6a8905b2
|
[
"MIT"
] | null | null | null |
cloudtropy/__init__.py
|
pedroramaciotti/Cloudtropy
|
bce1cc1cd6c5217ac20cf5a98491d10c6a8905b2
|
[
"MIT"
] | 1
|
2021-03-10T14:04:04.000Z
|
2021-03-10T14:04:04.000Z
|
from .pmfs import pmf
from .entropy_functions import testfunc
from .entropy_functions import entropy
| 33.333333
| 39
| 0.86
| 14
| 100
| 6
| 0.5
| 0.261905
| 0.47619
| 0.619048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11
| 100
| 3
| 40
| 33.333333
| 0.94382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c3c9b0bca1bb3de0aa889db5a5e05a0b9a3aba70
| 430
|
py
|
Python
|
sightseeingtech_passhub_api/api/__init__.py
|
BYMdevelopment/passhub-api-client-python
|
13537fe9b03d91aa451eb81a86047d8f715df681
|
[
"MIT"
] | null | null | null |
sightseeingtech_passhub_api/api/__init__.py
|
BYMdevelopment/passhub-api-client-python
|
13537fe9b03d91aa451eb81a86047d8f715df681
|
[
"MIT"
] | null | null | null |
sightseeingtech_passhub_api/api/__init__.py
|
BYMdevelopment/passhub-api-client-python
|
13537fe9b03d91aa451eb81a86047d8f715df681
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from sightseeingtech_passhub_api.api.order_record_resource_api import OrderRecordResourceApi
from sightseeingtech_passhub_api.api.product_resource_api import ProductResourceApi
from sightseeingtech_passhub_api.api.vendor_resource_api import VendorResourceApi
from sightseeingtech_passhub_api.api.voucher_resource_api import VoucherResourceApi
| 43
| 92
| 0.902326
| 53
| 430
| 6.90566
| 0.415094
| 0.20765
| 0.284153
| 0.31694
| 0.349727
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002506
| 0.072093
| 430
| 9
| 93
| 47.777778
| 0.914787
| 0.095349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.8
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
c3caa7d07eafc4f4d5c75023fcdd77ac7af84795
| 61,803
|
py
|
Python
|
sandbox/lib/jumpscale/JumpScale9Lib/clients/gitea/client/repos_service.py
|
Jumpscale/sandbox_linux
|
2aacd36b467ef30ac83718abfa82c6883b67a02f
|
[
"Apache-2.0"
] | 2
|
2017-06-07T08:11:47.000Z
|
2017-11-10T02:19:48.000Z
|
JumpScale9Lib/clients/gitea/client/repos_service.py
|
Jumpscale/lib9
|
82224784ef2a7071faeb48349007211c367bc673
|
[
"Apache-2.0"
] | 188
|
2017-06-21T06:16:13.000Z
|
2020-06-17T14:20:24.000Z
|
sandbox/lib/jumpscale/JumpScale9Lib/clients/gitea/client/repos_service.py
|
Jumpscale/sandbox_linux
|
2aacd36b467ef30ac83718abfa82c6883b67a02f
|
[
"Apache-2.0"
] | 3
|
2018-06-12T05:18:28.000Z
|
2019-09-24T06:49:17.000Z
|
# DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
from .Branch import Branch
from .Comment import Comment
from .DeployKey import DeployKey
from .Issue import Issue
from .Label import Label
from .Milestone import Milestone
from .PullRequest import PullRequest
from .Release import Release
from .Repository import Repository
from .SearchResults import SearchResults
from .Status import Status
from .TrackedTime import TrackedTime
from .User import User
from .WatchInfo import WatchInfo
from .unhandled_api_error import UnhandledAPIError
from .unmarshall_error import UnmarshallError
class ReposService():
def __init__(self, client):
pass
self.client = client
def repoMigrate(self, data, headers=None, query_params=None, content_type="application/json"):
"""
Migrate a remote git repository
It is method for POST /repos/migrate
"""
uri = self.client.base_url + "/repos/migrate"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return Repository(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoSearch(self, headers=None, query_params=None, content_type="application/json"):
"""
Search for repositories
It is method for GET /repos/search
"""
uri = self.client.base_url + "/repos/search"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return SearchResults(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoGetArchive(self, filepath, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get an archive of a repository
It is method for GET /repos/{owner}/{repo}/archive/{filepath}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/archive/" + filepath
return self.client.get(uri, None, headers, query_params, content_type)
def repoGetBranch(self, branch, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List a repository's branches
It is method for GET /repos/{owner}/{repo}/branches/{branch}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/branches/" + branch
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Branch(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoListBranches(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List a repository's branches
It is method for GET /repos/{owner}/{repo}/branches
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/branches"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Branch(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoDeleteCollaborator(
self,
collaborator,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Delete a collaborator from a repository
It is method for DELETE /repos/{owner}/{repo}/collaborators/{collaborator}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/collaborators/" + collaborator
return self.client.delete(uri, None, headers, query_params, content_type)
def repoCheckCollaborator(
self,
collaborator,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Check if a user is a collaborator of a repository
It is method for GET /repos/{owner}/{repo}/collaborators/{collaborator}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/collaborators/" + collaborator
return self.client.get(uri, None, headers, query_params, content_type)
def repoAddCollaborator(
self,
data,
collaborator,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Add a collaborator to a repository
It is method for PUT /repos/{owner}/{repo}/collaborators/{collaborator}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/collaborators/" + collaborator
return self.client.put(uri, data, headers, query_params, content_type)
def repoListCollaborators(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List a repository's collaborators
It is method for GET /repos/{owner}/{repo}/collaborators
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/collaborators"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(User(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoGetCombinedStatusByRef(
self,
ref,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Get a commit's combined status, by branch/tag/commit reference
It is method for GET /repos/{owner}/{repo}/commits/{ref}/statuses
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/commits/" + ref + "/statuses"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Status(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoGetEditorConfig(
self,
filepath,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Get the EditorConfig definitions of a file in a repository
It is method for GET /repos/{owner}/{repo}/editorconfig/{filepath}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/editorconfig/" + filepath
return self.client.get(uri, None, headers, query_params, content_type)
def listForks(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List a repository's forks
It is method for GET /repos/{owner}/{repo}/forks
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/forks"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Repository(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def createFork(self, data, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Fork a repository
It is method for POST /repos/{owner}/{repo}/forks
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/forks"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 202:
return Repository(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoGetHook(self, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get a hook
It is method for GET /repos/{owner}/{repo}/hooks/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/hooks/" + id
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Branch(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoEditHook(self, data, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Edit a hook in a repository
It is method for PATCH /repos/{owner}/{repo}/hooks/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/hooks/" + id
resp = self.client.patch(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Branch(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoListHooks(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List the hooks in a repository
It is method for GET /repos/{owner}/{repo}/hooks
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/hooks"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Branch(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoCreateHook(self, data, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Create a hook
It is method for POST /repos/{owner}/{repo}/hooks
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/hooks"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
resps = []
for elem in resp.json():
resps.append(Branch(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueDeleteComment(self, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Delete a comment
It is method for DELETE /repos/{owner}/{repo}/issues/comments/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/comments/" + id
return self.client.delete(uri, None, headers, query_params, content_type)
def issueEditComment(self, data, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Edit a comment
It is method for PATCH /repos/{owner}/{repo}/issues/comments/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/comments/" + id
resp = self.client.patch(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Comment(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueGetRepoComments(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List all comments in a repository
It is method for GET /repos/{owner}/{repo}/issues/comments
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/comments"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Comment(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueGetIssue(self, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get an issue by id
It is method for GET /repos/{owner}/{repo}/issues/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + id
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Issue(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueEditIssue(self, data, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Edit an issue
It is method for PATCH /repos/{owner}/{repo}/issues/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + id
resp = self.client.patch(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return Issue(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueDeleteCommentDeprecated(
self,
id,
index,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Delete a comment
It is method for DELETE /repos/{owner}/{repo}/issues/{index}/comments/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + index + "/comments/" + id
return self.client.delete(uri, None, headers, query_params, content_type)
def issueEditCommentDeprecated(
self,
data,
id,
index,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Edit a comment
It is method for PATCH /repos/{owner}/{repo}/issues/{index}/comments/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + index + "/comments/" + id
resp = self.client.patch(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Comment(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueGetComments(self, index, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List all comments on an issue
It is method for GET /repos/{owner}/{repo}/issues/{index}/comments
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + index + "/comments"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Comment(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueCreateComment(
self,
data,
index,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Add a comment to an issue
It is method for POST /repos/{owner}/{repo}/issues/{index}/comments
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + index + "/comments"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return Comment(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueRemoveLabel(
self,
id,
index,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Remove a label from an issue
It is method for DELETE /repos/{owner}/{repo}/issues/{index}/labels/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + index + "/labels/" + id
return self.client.delete(uri, None, headers, query_params, content_type)
def issueClearLabels(self, index, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Remove all labels from an issue
It is method for DELETE /repos/{owner}/{repo}/issues/{index}/labels
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + index + "/labels"
return self.client.delete(uri, None, headers, query_params, content_type)
def issueGetLabels(self, index, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get an issue's labels
It is method for GET /repos/{owner}/{repo}/issues/{index}/labels
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + index + "/labels"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Label(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueAddLabel(self, data, index, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Add a label to an issue
It is method for POST /repos/{owner}/{repo}/issues/{index}/labels
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + index + "/labels"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Label(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueReplaceLabels(
self,
data,
index,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Replace an issue's labels
It is method for PUT /repos/{owner}/{repo}/issues/{index}/labels
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + index + "/labels"
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Label(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueTrackedTimes(self, index, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List an issue's tracked times
It is method for GET /repos/{owner}/{repo}/issues/{index}/times
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + index + "/times"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(TrackedTime(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueAddTime(self, data, index, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Add a tracked time to a issue
It is method for POST /repos/{owner}/{repo}/issues/{index}/times
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues/" + index + "/times"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return TrackedTime(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueListIssues(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List a repository's issues
It is method for GET /repos/{owner}/{repo}/issues
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Issue(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueCreateIssue(self, data, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Create an issue
It is method for POST /repos/{owner}/{repo}/issues
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/issues"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return Issue(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoDeleteKey(self, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Delete a key from a repository
It is method for DELETE /repos/{owner}/{repo}/keys/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/keys/" + id
return self.client.delete(uri, None, headers, query_params, content_type)
def repoGetKey(self, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get a repository's key by id
It is method for GET /repos/{owner}/{repo}/keys/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/keys/" + id
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return DeployKey(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoListKeys(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List a repository's keys
It is method for GET /repos/{owner}/{repo}/keys
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/keys"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(DeployKey(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoCreateKey(self, data, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Add a key to a repository
It is method for POST /repos/{owner}/{repo}/keys
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/keys"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return DeployKey(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueDeleteLabel(self, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Delete a label
It is method for DELETE /repos/{owner}/{repo}/labels/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/labels/" + id
return self.client.delete(uri, None, headers, query_params, content_type)
def issueGetLabel(self, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get a single label
It is method for GET /repos/{owner}/{repo}/labels/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/labels/" + id
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Label(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueEditLabel(self, data, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Update a label
It is method for PATCH /repos/{owner}/{repo}/labels/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/labels/" + id
resp = self.client.patch(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Label(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueListLabels(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get all of a repository's labels
It is method for GET /repos/{owner}/{repo}/labels
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/labels"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Label(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueCreateLabel(self, data, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Create a label
It is method for POST /repos/{owner}/{repo}/labels
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/labels"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return Label(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueDeleteMilestone(self, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Delete a milestone
It is method for DELETE /repos/{owner}/{repo}/milestones/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/milestones/" + id
return self.client.delete(uri, None, headers, query_params, content_type)
def issueGetMilestone(self, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get a milestone
It is method for GET /repos/{owner}/{repo}/milestones/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/milestones/" + id
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Milestone(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueEditMilestone(
self,
data,
id,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Update a milestone
It is method for PATCH /repos/{owner}/{repo}/milestones/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/milestones/" + id
resp = self.client.patch(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Milestone(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueGetMilestones(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get all of a repository's milestones
It is method for GET /repos/{owner}/{repo}/milestones
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/milestones"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Milestone(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def issueCreateMilestone(self, data, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Create a milestone
It is method for POST /repos/{owner}/{repo}/milestones
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/milestones"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return Milestone(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoMirrorSync(self, data, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Sync a mirrored repository
It is method for POST /repos/{owner}/{repo}/mirror-sync
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/mirror-sync"
return self.client.post(uri, data, headers, query_params, content_type)
def repoPullRequestIsMerged(
self,
index,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Check if a pull request has been merged
It is method for GET /repos/{owner}/{repo}/pulls/{index}/merge
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/pulls/" + index + "/merge"
return self.client.get(uri, None, headers, query_params, content_type)
def repoMergePullRequest(
self,
data,
index,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Merge a pull request
It is method for POST /repos/{owner}/{repo}/pulls/{index}/merge
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/pulls/" + index + "/merge"
return self.client.post(uri, data, headers, query_params, content_type)
def repoGetPullRequest(self, index, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get a pull request
It is method for GET /repos/{owner}/{repo}/pulls/{index}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/pulls/" + index
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return PullRequest(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoEditPullRequest(
self,
data,
index,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Update a pull request
It is method for PATCH /repos/{owner}/{repo}/pulls/{index}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/pulls/" + index
resp = self.client.patch(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return PullRequest(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoListPullRequests(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List a repo's pull requests
It is method for GET /repos/{owner}/{repo}/pulls
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/pulls"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(PullRequest(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoCreatePullRequest(
self,
data,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Create a pull request
It is method for POST /repos/{owner}/{repo}/pulls
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/pulls"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return PullRequest(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoGetRawFile(self, filepath, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get a file from a repository
It is method for GET /repos/{owner}/{repo}/raw/{filepath}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/raw/" + filepath
return self.client.get(uri, None, headers, query_params, content_type)
def repoDeleteRelease(self, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Delete a release
It is method for DELETE /repos/{owner}/{repo}/releases/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/releases/" + id
return self.client.delete(uri, None, headers, query_params, content_type)
def repoEditRelease(self, data, id, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Update a release
It is method for PATCH /repos/{owner}/{repo}/releases/{id}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/releases/" + id
resp = self.client.patch(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Release(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoCreateRelease(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Create a release
It is method for GET /repos/{owner}/{repo}/releases
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/releases"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 201:
return Release(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoListStargazers(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List a repo's stargazers
It is method for GET /repos/{owner}/{repo}/stargazers
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/stargazers"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(User(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoListStatuses(self, sha, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get a commit's statuses
It is method for GET /repos/{owner}/{repo}/statuses/{sha}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/statuses/" + sha
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Status(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoCreateStatus(
self,
data,
sha,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Create a commit status
It is method for POST /repos/{owner}/{repo}/statuses/{sha}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/statuses/" + sha
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Status(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoListSubscribers(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List a repo's watchers
It is method for GET /repos/{owner}/{repo}/subscribers
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/subscribers"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(User(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentDeleteSubscription(
self,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Unwatch a repo
It is method for DELETE /repos/{owner}/{repo}/subscription
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/subscription"
return self.client.delete(uri, None, headers, query_params, content_type)
def userCurrentCheckSubscription(
self,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Check if the current user is watching a repo
It is method for GET /repos/{owner}/{repo}/subscription
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/subscription"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return WatchInfo(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userCurrentPutSubscription(
self,
data,
repo,
owner,
headers=None,
query_params=None,
content_type="application/json"):
"""
Watch a repo
It is method for PUT /repos/{owner}/{repo}/subscription
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/subscription"
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return WatchInfo(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def userTrackedTimes(self, tracker, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List a user's tracked times in a repo
It is method for GET /repos/{owner}/{repo}/times/{tracker}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/times/" + tracker
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(TrackedTime(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoTrackedTimes(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
List a repo's tracked times
It is method for GET /repos/{owner}/{repo}/times
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo + "/times"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(TrackedTime(elem))
return resps, resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoDelete(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Delete a repository
It is method for DELETE /repos/{owner}/{repo}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo
return self.client.delete(uri, None, headers, query_params, content_type)
def repoGet(self, repo, owner, headers=None, query_params=None, content_type="application/json"):
"""
Get a repository
It is method for GET /repos/{owner}/{repo}
"""
uri = self.client.base_url + "/repos/" + owner + "/" + repo
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return Repository(resp.json()), resp
message = 'unknown status code={}'.format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code,
message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def repoDeleteHook(self, user, repo, id, headers=None, query_params=None, content_type="application/json"):
"""
Delete a hook in a repository
It is method for DELETE /repos/{user}/{repo}/hooks/{id}
"""
uri = self.client.base_url + "/repos/" + user + "/" + repo + "/hooks/" + id
return self.client.delete(uri, None, headers, query_params, content_type)
| 41.395177
| 120
| 0.567917
| 6,625
| 61,803
| 5.219019
| 0.035623
| 0.060157
| 0.063165
| 0.045812
| 0.929518
| 0.926278
| 0.920176
| 0.918065
| 0.901145
| 0.87795
| 0
| 0.003748
| 0.32657
| 61,803
| 1,492
| 121
| 41.422922
| 0.827006
| 0.098652
| 0
| 0.884544
| 1
| 0
| 0.067548
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06797
| false
| 0.000931
| 0.014898
| 0
| 0.150838
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c3e2360ebb7da0f33cd3cab2f443c2fcea1bc7bc
| 159
|
py
|
Python
|
src/cms/views/media/__init__.py
|
S10MC2015/cms-django
|
b08f2be60a9db6c8079ee923de2cd8912f550b12
|
[
"Apache-2.0"
] | 4
|
2019-12-05T16:45:17.000Z
|
2020-05-09T07:26:34.000Z
|
src/cms/views/media/__init__.py
|
S10MC2015/cms-django
|
b08f2be60a9db6c8079ee923de2cd8912f550b12
|
[
"Apache-2.0"
] | 56
|
2019-12-05T12:31:37.000Z
|
2021-01-07T15:47:45.000Z
|
src/cms/views/media/__init__.py
|
S10MC2015/cms-django
|
b08f2be60a9db6c8079ee923de2cd8912f550b12
|
[
"Apache-2.0"
] | 2
|
2019-12-11T09:52:26.000Z
|
2020-05-09T07:26:38.000Z
|
"""
Python standard Init-File
"""
from .media_actions import delete_file
from .media_edit_view import MediaEditView
from .media_list_view import MediaListView
| 22.714286
| 42
| 0.830189
| 22
| 159
| 5.727273
| 0.636364
| 0.214286
| 0.206349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106918
| 159
| 6
| 43
| 26.5
| 0.887324
| 0.157233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7f05c05b0bc6fa312e70eccd556d1d0fac24c2cc
| 1,797
|
py
|
Python
|
webapp/scanner/migrations/0021_auto_20210518_1146.py
|
fragmuffin/QR-Code-Reader
|
c024596b2a8844f759bc0c96a07c6325b824d66e
|
[
"MIT"
] | 2
|
2019-05-22T04:20:57.000Z
|
2020-02-11T12:33:44.000Z
|
webapp/scanner/migrations/0021_auto_20210518_1146.py
|
fragmuffin/QR-Code-Reader
|
c024596b2a8844f759bc0c96a07c6325b824d66e
|
[
"MIT"
] | 7
|
2019-05-24T04:23:37.000Z
|
2021-11-14T09:57:49.000Z
|
webapp/scanner/migrations/0021_auto_20210518_1146.py
|
fragmuffin/QR-Code-Reader
|
c024596b2a8844f759bc0c96a07c6325b824d66e
|
[
"MIT"
] | 2
|
2019-03-28T11:40:18.000Z
|
2020-01-09T02:03:00.000Z
|
# Generated by Django 3.2.3 on 2021-05-18 11:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scanner', '0020_event_is_template'),
]
operations = [
migrations.AlterField(
model_name='address',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='attendance',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='contact',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='event',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='locblock',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='membership',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='membershipstatus',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='membershiptype',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='participantstatustype',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
| 30.457627
| 70
| 0.56483
| 164
| 1,797
| 6.060976
| 0.268293
| 0.181087
| 0.226358
| 0.262575
| 0.722334
| 0.722334
| 0.722334
| 0.722334
| 0.722334
| 0.722334
| 0
| 0.015561
| 0.320534
| 1,797
| 58
| 71
| 30.982759
| 0.798526
| 0.025042
| 0
| 0.692308
| 1
| 0
| 0.082857
| 0.024571
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019231
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
61303c7eda46dbb7fc4fd7dffdfe8d71125e9a15
| 21,364
|
py
|
Python
|
BattleshipEngine.py
|
tclax/PyBattleship
|
51e554fee7edb45ff0907672cac2abd371b992cb
|
[
"MIT"
] | null | null | null |
BattleshipEngine.py
|
tclax/PyBattleship
|
51e554fee7edb45ff0907672cac2abd371b992cb
|
[
"MIT"
] | null | null | null |
BattleshipEngine.py
|
tclax/PyBattleship
|
51e554fee7edb45ff0907672cac2abd371b992cb
|
[
"MIT"
] | null | null | null |
from Board import Board
from Ship import Ship
from SimulationResult import SimulationResult
from SimulationStatistics import SimulationStatistics
import functionalComponents
import random, copy
#Represents a game of battleship. Using a board of set ships, the engine will attempt to compute positions of the ships in an attempt to sink all the ships. The engine will determine how many turns have passed after all ships are sunk. The fewer the turns, the better the engine is.
class BattleshipEngine:
def __init__(self):
self.board = Board(8)
self.simulationResuts = {}
def PrintBoard(self):
self.board.PrintBoard()
def SetNewBoard(self):
#setup new board
self.board = Board(8)
#place 5 ships in random coordinates
self.board.PlaceShipAtRandomCoordinate(Ship(5, 'A'))
self.board.PlaceShipAtRandomCoordinate(Ship(4, 'B'))
self.board.PlaceShipAtRandomCoordinate(Ship(3, 'S'))
self.board.PlaceShipAtRandomCoordinate(Ship(3, 'S'))
self.board.PlaceShipAtRandomCoordinate(Ship(2, 'C'))
self.board.initalTileListState = copy.deepcopy(self.board.tileList)
#runs the Battleship simulations against a set number of attack strategies.
def StartBattleshipSimulation(self, iterations):
for x in range(0, iterations):
#start a new random board
self.SetNewBoard()
#start the simulation for the horizontal attack
simulationResult = self.HorizontalLinearAttackStrategy()
#add the results to the dictionary
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
#reset the board
self.board.PrintBoard()
self.board.ResetBoard()
self.board.PrintBoard()
#start the simulation for the vertical attack
simulationResult = self.VerticalLinearAttackStrategy()
#add the results to the dictionary
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
#reset the board
self.board.PrintBoard()
self.board.ResetBoard()
self.board.PrintBoard()
def DEVStartBattleshipSimulation(self, iterations):
self.SetNewBoard()
for x in range(0, iterations):
simulationResult = self.DiagonalHitScanAttackStratgy()
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
self.board.ResetBoard()
simulationResult = self.DiagonalLinearAttackStrategy()
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
self.board.ResetBoard()
simulationResult = self.RandomHitScanAttackStrategy()
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
self.board.ResetBoard()
simulationResult = self.VerticalLinearAttackStrategy()
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
self.board.ResetBoard()
simulationResult = self.HorizontalLinearAttackStrategy()
self.simulationResuts[simulationResult.attackStrategy+'#'+str(x)] = simulationResult
self.board.ResetBoard()
stats = SimulationStatistics(self.simulationResuts.values())
stats.PrintSimulationStatistics()
#allows the user to attack by entering coordinates
def AttackStrategyUserInput(self):
moves = 0
while(not self.board.CheckIfAllShipsSunk()):
print('Not sunk')
moves += 1
self.board.PrintBoard()
while True:
print('Enter a starting coordinate for the ship:')
x = input('Enter x coordinate: ')
y = input('Enter y coordinate: ')
if(functionalComponents.CoordinateString(x,y) in self.board.tileList and (self.board.tileList[functionalComponents.CoordinateString(x,y)].code != self.board.missTileCode or self.board.tileList[functionalComponents.CoordinateString(x,y)].code != self.board.hitTileCode)):
break
y = int(y)
self.board.AttackBoard(functionalComponents.CoordinateString(x,y))
#Attacks from an inital starting point left to right
def HorizontalLinearAttackStrategy(self):
coordinateList = []
moves = 1
#calc starting point and make first attack
startingChar = 'A'
startingX = chr(ord(startingChar) + random.randint(0, self.board.size - 1))
startingY = random.randint(0, self.board.size - 1)
coordinateList.append(functionalComponents.CoordinateString(startingX, startingY))
self.board.AttackBoard(functionalComponents.CoordinateString(startingX, startingY))
x = str(startingX)
y = startingY
originalTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
#loop until all the ships are sunk
#calculate the next position to attack
while(not self.board.CheckIfAllShipsSunk()):
#self.board.PrintBoard()
currentTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
if(not currentTile.hasEastTile and not currentTile.hasSouthTile):
x = startingChar
y = 0
elif(not currentTile.hasEastTile):
x = chr(ord(x) + 1)
y = 0
else:
y += 1
coordinateList.append(functionalComponents.CoordinateString(x, y))
self.board.AttackBoard(functionalComponents.CoordinateString(x,y))
moves += 1
return SimulationResult(self.board.initalTileListState, coordinateList, moves, "Horizontal Linear")
#attacks top to bottom, starting at a random point and moving down each row, then to the next column
def VerticalLinearAttackStrategy(self):
coordinateList = []
moves = 1
#calc starting point and make first attack
startingChar = 'A'
startingX = chr(ord(startingChar) + random.randint(0, self.board.size - 1))
startingY = random.randint(0, self.board.size - 1)
coordinateList.append(functionalComponents.CoordinateString(startingX, startingY))
self.board.AttackBoard(functionalComponents.CoordinateString(startingX, startingY))
x = str(startingX)
y = startingY
originalTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
#loop until all the ships are sunk
#calculate the next position to attack
while(not self.board.CheckIfAllShipsSunk()):
#self.board.PrintBoard()
currentTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
if(not currentTile.hasEastTile and not currentTile.hasSouthTile):
x = startingChar
y = 0
elif(not currentTile.hasSouthTile):
x = startingChar
y += 1
else:
x = chr(ord(x) + 1)
coordinateList.append(functionalComponents.CoordinateString(x, y))
self.board.AttackBoard(functionalComponents.CoordinateString(x, y))
moves += 1
return SimulationResult(self.board.initalTileListState, coordinateList, moves, "Vertical Linear")
#randomly attacks coordinates until a hit is registers. then attack each adjacent tile until each direction registers a miss or is off the board
def RandomHitScanAttackStrategy(self):
coordinateList = []
validCoordinateList = []
moves = 0
#set all adjacent flags to false until a hit is registered
checkNorth = False
checkSouth = False
checkWest = False
checkEast = False
currentCoordinate = ''
#build a list of all coordinates
availableCoordinates = self.board.GetAvailableCoordinateList()
#loop until all ships are sunk
while(not self.board.CheckIfAllShipsSunk()):
#if all check flags are set to false, calc a new random coordinate that is available
if(not checkNorth and not checkSouth and not checkWest and not checkEast):
currentCoordinate = random.choice(availableCoordinates)
initialCoordinate = currentCoordinate
elif(checkNorth):
while(checkNorth):
currentCoordinate = functionalComponents.MoveCoordinateNorth(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkNorth = self.board.tileList[currentCoordinate].hasNorthTile and functionalComponents.MoveCoordinateNorth(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
elif(checkSouth):
while(checkSouth):
currentCoordinate = functionalComponents.MoveCoordinateSouth(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkSouth = self.board.tileList[currentCoordinate].hasSouthTile and functionalComponents.MoveCoordinateSouth(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
elif(checkWest):
while(checkWest):
currentCoordinate = functionalComponents.MoveCoordinateWest(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkWest = self.board.tileList[currentCoordinate].hasWestTile and functionalComponents.MoveCoordinateWest(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
elif(checkEast):
while(checkEast):
currentCoordinate = functionalComponents.MoveCoordinateEast(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkEast = self.board.tileList[currentCoordinate].hasEastTile and functionalComponents.MoveCoordinateEast(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
#set back to the original coordinate
currentCoordinate = initialCoordinate
#adjust check flags to the new coordinate
checkNorth = self.board.tileList[currentCoordinate].hasNorthTile and functionalComponents.MoveCoordinateNorth(currentCoordinate) in availableCoordinates
checkSouth = self.board.tileList[currentCoordinate].hasSouthTile and functionalComponents.MoveCoordinateSouth(currentCoordinate) in availableCoordinates
checkWest = self.board.tileList[currentCoordinate].hasWestTile and functionalComponents.MoveCoordinateWest(currentCoordinate) in availableCoordinates
checkEast = self.board.tileList[currentCoordinate].hasEastTile and functionalComponents.MoveCoordinateEast(currentCoordinate) in availableCoordinates
#attack with the generated coordinate
if(currentCoordinate in availableCoordinates):
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
moves += 1
return SimulationResult(self.board.initalTileListState, coordinateList, moves, "Random Hitscan")
#starts with a random tile on the board. moves diagonally, down and to the left after each attack.
def DiagonalLinearAttackStrategy(self):
coordinateList = []
moves = 1
#calc starting point and make first attack
startingChar = 'A'
startingX = chr(ord(startingChar) + random.randint(0, self.board.size - 1))
startingY = random.randint(0, self.board.size - 1)
coordinateList.append(functionalComponents.CoordinateString(startingX, startingY))
self.board.AttackBoard(functionalComponents.CoordinateString(startingX, startingY))
x = str(startingX)
y = startingY
originalTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
#loop until all the ships are sunk
#calculate the next position to attack
while(not self.board.CheckIfAllShipsSunk()):
currentTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
if(not currentTile.hasWestTile and not currentTile.hasNorthTile):
x = startingChar
y += 1
elif(not currentTile.hasEastTile and not currentTile.hasSouthTile):
x = startingChar
y = 0
elif(not currentTile.hasWestTile and not currentTile.hasSouthTile):
x = chr(ord(startingChar) + 1)
y = self.board.size - 1
elif(not currentTile.hasSouthTile and functionalComponents.MoveCoordinateWest(currentTile.GetCoordiante()) != self.board.emptyTileCode):
x = chr(ord(startingChar) + y + 1)
y = self.board.size - 1
elif(not currentTile.hasWestTile):
y = ord(x) - ord(startingChar) + 1
x = startingChar
else:
x = chr(ord(x) + 1)
y -= 1
coordinateList.append(functionalComponents.CoordinateString(x, y))
self.board.AttackBoard(functionalComponents.CoordinateString(x,y))
moves += 1
return SimulationResult(self.board.initalTileListState, coordinateList, moves, "Diagonal Linear")
def DiagonalHitScanAttackStratgy(self):
coordinateList = []
moves = 1
#calc starting point and make first attack
startingChar = 'A'
startingX = chr(ord(startingChar) + random.randint(0, self.board.size - 1))
startingY = random.randint(0, self.board.size - 1)
coordinateList.append(functionalComponents.CoordinateString(startingX, startingY))
self.board.AttackBoard(functionalComponents.CoordinateString(startingX, startingY))
x = str(startingX)
y = startingY
originalTile = self.board.tileList[functionalComponents.CoordinateString(x, y)]
validCoordinateList = []
#set all adjacent flags to false until a hit is registered
checkNorth = False
checkSouth = False
checkWest = False
checkEast = False
#build a list of all coordinates
availableCoordinates = self.board.GetAvailableCoordinateList()
#loop until all ships are sunk
while(not self.board.CheckIfAllShipsSunk()):
#if all check flags are set to false, calc a new random coordinate that is available
if(not checkNorth and not checkSouth and not checkWest and not checkEast):
currentTile = self.board.tileList[functionalComponents.CoordinateString(x,y)]
if(not currentTile.hasWestTile and not currentTile.hasNorthTile):
x = startingChar
y += 1
elif(not currentTile.hasEastTile and not currentTile.hasSouthTile):
x = startingChar
y = 0
elif(not currentTile.hasWestTile and not currentTile.hasSouthTile):
x = chr(ord(startingChar) + 1)
y = self.board.size - 1
elif(not currentTile.hasSouthTile and functionalComponents.MoveCoordinateWest(currentTile.GetCoordiante()) != self.board.emptyTileCode):
x = chr(ord(startingChar) + y + 1)
y = self.board.size - 1
elif(not currentTile.hasWestTile):
y = ord(x) - ord(startingChar) + 1
x = startingChar
else:
x = chr(ord(x) + 1)
y -= 1
currentCoordinate = functionalComponents.CoordinateString(x, y)
initialCoordinate = currentCoordinate
elif(checkNorth):
while(checkNorth):
currentCoordinate = functionalComponents.MoveCoordinateNorth(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkNorth = self.board.tileList[currentCoordinate].hasNorthTile and functionalComponents.MoveCoordinateNorth(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
elif(checkSouth):
while(checkSouth):
currentCoordinate = functionalComponents.MoveCoordinateSouth(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkSouth = self.board.tileList[currentCoordinate].hasSouthTile and functionalComponents.MoveCoordinateSouth(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
elif(checkWest):
while(checkWest):
currentCoordinate = functionalComponents.MoveCoordinateWest(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkWest = self.board.tileList[currentCoordinate].hasWestTile and functionalComponents.MoveCoordinateWest(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
elif(checkEast):
while(checkEast):
currentCoordinate = functionalComponents.MoveCoordinateEast(currentCoordinate)
#attack with the generated coordinate
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
checkEast = self.board.tileList[currentCoordinate].hasEastTile and functionalComponents.MoveCoordinateEast(currentCoordinate) in availableCoordinates and self.board.tileList[currentCoordinate].code != self.board.missTileCode
moves += 1
#set back to the original coordinate
currentCoordinate = initialCoordinate
#adjust check flags to the new coordinate
checkNorth = self.board.tileList[currentCoordinate].hasNorthTile and functionalComponents.MoveCoordinateNorth(currentCoordinate) in availableCoordinates
checkSouth = self.board.tileList[currentCoordinate].hasSouthTile and functionalComponents.MoveCoordinateSouth(currentCoordinate) in availableCoordinates
checkWest = self.board.tileList[currentCoordinate].hasWestTile and functionalComponents.MoveCoordinateWest(currentCoordinate) in availableCoordinates
checkEast = self.board.tileList[currentCoordinate].hasEastTile and functionalComponents.MoveCoordinateEast(currentCoordinate) in availableCoordinates
#attack with the generated coordinate
if(currentCoordinate in availableCoordinates):
coordinateList.append(currentCoordinate)
availableCoordinates.remove(currentCoordinate)
self.board.AttackBoard(currentCoordinate)
moves += 1
return SimulationResult(self.board.initalTileListState, coordinateList, moves, "Diagonal Hitscan")
| 51.980535
| 286
| 0.647444
| 1,824
| 21,364
| 7.58114
| 0.113487
| 0.074197
| 0.044258
| 0.059011
| 0.832152
| 0.826078
| 0.819424
| 0.819424
| 0.815158
| 0.815158
| 0
| 0.004631
| 0.282344
| 21,364
| 410
| 287
| 52.107317
| 0.897274
| 0.113087
| 0
| 0.842466
| 0
| 0
| 0.009635
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037671
| false
| 0
| 0.020548
| 0
| 0.078767
| 0.006849
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4eff14ec5d75bc26443352e4db2b554b80a76d33
| 106
|
py
|
Python
|
eg/appengine/main.py
|
NathanW2/hy
|
c5e2fd955f707dedc2743acea232e8f7b0cd0868
|
[
"MIT"
] | 12
|
2015-01-01T21:21:31.000Z
|
2021-06-14T19:51:59.000Z
|
eg/appengine/main.py
|
NathanW2/hy
|
c5e2fd955f707dedc2743acea232e8f7b0cd0868
|
[
"MIT"
] | null | null | null |
eg/appengine/main.py
|
NathanW2/hy
|
c5e2fd955f707dedc2743acea232e8f7b0cd0868
|
[
"MIT"
] | 2
|
2016-01-17T21:59:29.000Z
|
2016-09-06T20:56:41.000Z
|
from hy.importer import import_file_to_module
__hymain__ = import_file_to_module('__hymain__', 'main.hy')
| 35.333333
| 59
| 0.830189
| 16
| 106
| 4.625
| 0.5625
| 0.27027
| 0.324324
| 0.486486
| 0.648649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 106
| 2
| 60
| 53
| 0.755102
| 0
| 0
| 0
| 0
| 0
| 0.160377
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f67155471432150b74f690fbedbdefd7ec910148
| 15,183
|
py
|
Python
|
tests/test_views/test_board.py
|
sirghiny/Real-Estate-Manager
|
10272feec22c40da7f927219225b8d2e27a20e38
|
[
"MIT"
] | null | null | null |
tests/test_views/test_board.py
|
sirghiny/Real-Estate-Manager
|
10272feec22c40da7f927219225b8d2e27a20e38
|
[
"MIT"
] | 1
|
2018-05-09T13:17:41.000Z
|
2018-05-09T13:17:41.000Z
|
tests/test_views/test_board.py
|
sirghiny/Real-Estate-Manager
|
10272feec22c40da7f927219225b8d2e27a20e38
|
[
"MIT"
] | 2
|
2018-05-01T15:03:13.000Z
|
2019-10-28T13:59:29.000Z
|
# pylint:disable=missing-docstring, invalid-name
from json import dumps, loads
from api.models import Board, Unit, User
from tests.base import BaseCase
class TestBoard(BaseCase):
"""
Convesation resource tests.
"""
def test_create_board_correctly(self):
self.user1.save()
self.user2.save()
response = self.client.post(
'/api/v1/boards/',
content_type='application/json',
data=dumps(self.board3_dict),
headers=self.headers)
expected = sorted(['estates_owned', 'id', 'members', 'units_owned'])
actual = sorted([i for i in loads(response.data)['data']['board']])
self.assertEqual(201, response.status_code)
self.assertEqual(expected, actual)
def test_create_board_no_members(self):
response = self.client.post(
'/api/v1/boards/',
content_type='application/json',
data=dumps({}),
headers=self.headers)
expected = {
'status': 'fail',
'message': 'Members list required.',
'help': 'It can be empty if only oneself is a member.'
}
actual = loads(response.data)
self.assertEqual(400, response.status_code)
self.assertEqual(expected, actual)
def test_create_board_nonexistent_members(self):
response = self.client.post(
'/api/v1/boards/',
content_type='application/json',
data=dumps(self.board3_dict),
headers=self.headers)
expected = {
'status': 'fail',
'message': 'The user does not exist.',
'missing_user': 1
}
actual = loads(response.data)
self.assertEqual(404, response.status_code)
self.assertEqual(expected, actual)
def test_view_board(self):
self.board1.save()
response = self.client.get(
'/api/v1/boards/1',
headers=self.headers)
expected = sorted(['estates_owned', 'id', 'members', 'units_owned'])
actual = sorted([i for i in loads(response.data)['data']['board']])
self.assertEqual(200, response.status_code)
self.assertEqual(expected, actual)
def test_view_many_boards(self):
self.board1.save()
self.board2.save()
response = self.client.get(
'/api/v1/boards/',
headers=self.headers)
expected = sorted(['estates_owned', 'id', 'members', 'units_owned'])
actual1 = sorted(
[i for i in loads(response.data)['data']['boards'][0]])
actual2 = sorted(
[i for i in loads(response.data)['data']['boards'][1]])
self.assertEqual(200, response.status_code)
self.assertEqual(2, len(loads(response.data)['data']['boards']))
self.assertEqual(expected, actual1)
self.assertEqual(expected, actual2)
def test_view_many_boards_if_none_exist(self):
response = self.client.get(
'/api/v1/boards/',
headers=self.headers)
expected = {
'status': 'fail',
'message': 'No boards exist.',
'help': 'Add boards to the database.'}
actual = loads(response.data)
self.assertEqual(404, response.status_code)
self.assertEqual(expected, actual)
def test_view_nonexistent_board(self):
response = self.client.get(
'/api/v1/boards/1',
headers=self.headers)
expected = {
'status': 'fail',
'message': 'The board does not exist.',
'help': 'Ensure board_id is of an existent board.'
}
actual = loads(response.data)
self.assertEqual(404, response.status_code)
self.assertEqual(expected, actual)
def test_view_members_of_nonexistent_board(self):
response = self.client.get(
'/api/v1/boards/1/members/',
headers=self.headers)
expected = {
'status': 'fail',
'message': 'The board does not exist.',
'help': 'Ensure board_id is of an existent board.'
}
actual = loads(response.data)
self.assertEqual(404, response.status_code)
self.assertEqual(expected, actual)
def test_view_members_of_board_with_none(self):
self.board1.save()
response = self.client.get(
'/api/v1/boards/1/members/',
headers=self.headers)
expected = {
'status': 'fail',
'message': 'The board has no members.',
'help': 'Add a user to the board if necessary.'}
actual = loads(response.data)
self.assertEqual(404, response.status_code)
self.assertEqual(expected, actual)
def test_view_members_of_board(self):
self.board1.save()
self.user1.save()
self.user2.save()
board1 = Board.get(id=1)
board1.insert('members', *User.get_all())
response = self.client.get(
'/api/v1/boards/1/members/',
headers=self.headers)
expected = {
'status': 'success',
'data': {
'members': [
{'id': 1, 'email': 'first1.last1@email.com',
'name': 'First1 Middle1 Last1',
'phone_number': '000 12 3456781'},
{'id': 2, 'email': 'first2.last2@email.com',
'name': 'First2 Middle2 Last2',
'phone_number': '000 12 3456782'}]}}
actual = loads(response.data)
self.assertEqual(200, response.status_code)
self.assertEqual(expected, actual)
def test_delete_board_nonexistent(self):
response = self.client.delete(
'/api/v1/boards/1',
headers=self.headers)
expected = {
'status': 'fail',
'message': 'The board does not exist.',
'help': 'Ensure board_id is of an existent board.'
}
actual = loads(response.data)
self.assertEqual(404, response.status_code)
self.assertEqual(expected, actual)
def test_delete_board(self):
self.board1.save()
response = self.client.delete(
'/api/v1/boards/1',
headers=self.headers)
expected = {
'status': 'success',
'message': 'Board with id 1 deleted.'
}
actual = loads(response.data)
self.assertEqual(200, response.status_code)
self.assertEqual(expected, actual)
def test_get_board_conversation(self):
self.user1.save()
self.user2.save()
self.client.post(
'/api/v1/boards/',
content_type='application/json',
data=dumps(self.board3_dict),
headers=self.headers)
response = self.client.get('/api/v1/boards/1/conversation/')
expected = sorted(['id', 'timestamp', 'title',
'board_id', 'participants', 'messages'])
actual = sorted(
list(loads(response.data)['data']['conversation'].keys()))
self.assertEqual(expected, actual)
def test_get_board_conversation_nonexistent(self):
response = self.client.get('/api/v1/boards/1/conversation/')
expected = {
'status': 'fail',
'message': 'The board does not exist.',
'help': 'Ensure board_id is of an existent board.'}
actual = loads(response.data)
self.assertEqual(expected, actual)
def test_get_board_estate_nonexistent(self):
response = self.client.get('/api/v1/boards/1/estates/')
expected = {
'status': 'fail',
'message': 'The board does not exist.',
'help': 'Ensure board_id is of an existent board.'}
actual = loads(response.data)
self.assertEqual(expected, actual)
def test_get_board_estates(self):
self.board1.save()
Board.get(id=1).estates_owned.append(self.estate1)
response = self.client.get('/api/v1/boards/1/estates/')
expected = {
'status': 'success',
'data': {
'estates': [
{'id': 1, 'address': 'Random Address 1', 'board_id': 1,
'board': {'id': 1, 'members': []},
'payment': 'None', 'units': []}]}}
actual = loads(response.data)
self.assertEqual(expected, actual)
def test_get_board_units_nonexistent(self):
response = self.client.get('/api/v1/boards/1/units/')
expected = {
'status': 'fail',
'message': 'The board does not exist.',
'help': 'Ensure board_id is of an existent board.'}
actual = loads(response.data)
self.assertEqual(expected, actual)
def test_get_board_units(self):
self.unit1.save()
self.board1.save()
unit1 = Unit.get(id=1)
unit1.insert('estate', self.estate1)
unit1.insert('board', self.board1)
unit1.insert('payment', self.payment1)
unit1.insert('resident', self.user1)
Board.get(id=1).insert('units_owned', Unit.get(id=1))
response = self.client.get('/api/v1/boards/1/units/')
expected = {
'status': 'success',
'data': {
'units': [
{'id': 1, 'name': 'Random Unit 1',
'board_id': 1, 'estate_id': 1, 'user_id': 1,
'board': {'id': 1, 'members': []},
'estate': {'id': 1, 'address': 'Random Address 1'},
'payment': {'id': 1, 'required': 0.0, 'balance': 0.0},
'resident': {
'id': 1, 'email': 'first1.last1@email.com',
'name': 'First1 Middle1 Last1',
'phone_number': '000 12 3456781'}}]}}
actual = loads(response.data)
self.assertEqual(expected, actual)
def test_add_board_members(self):
self.board1.save()
self.user1.save()
self.user2.save()
board1 = Board.get(id=1)
board1.insert('conversation', self.conversation1)
board1.insert('members', User.get(id=1))
response = self.client.patch(
'/api/v1/boards/1/members/',
headers=self.headers,
content_type='application/json',
data=dumps({'new_data': {'add': [2], 'remove': []}}))
expected = {
'status': 'success',
'data': {
'updated_members': [
{'id': 1, 'email': 'first1.last1@email.com',
'name': 'First1 Middle1 Last1',
'phone_number': '000 12 3456781'},
{'id': 2, 'email': 'first2.last2@email.com',
'name': 'First2 Middle2 Last2',
'phone_number': '000 12 3456782'}]}}
actual = loads(response.data)
self.assertDictEqual(expected, actual)
def test_remove_board_members(self):
self.board1.save()
self.user1.save()
self.user2.save()
board1 = Board.get(id=1)
board1.insert('conversation', self.conversation1)
board1.insert('members', User.get(id=1), User.get(id=2))
response = self.client.patch(
'/api/v1/boards/1/members/',
headers=self.headers,
content_type='application/json',
data=dumps({'new_data': {'add': [], 'remove': [2]}}))
expected = {
'status': 'success',
'data': {
'updated_members': [
{'id': 1, 'email': 'first1.last1@email.com',
'name': 'First1 Middle1 Last1',
'phone_number': '000 12 3456781'}]}}
actual = loads(response.data)
self.assertDictEqual(expected, actual)
def test_add_and_remove_board_members(self):
self.board1.save()
self.user1.save()
self.user2.save()
board1 = Board.get(id=1)
board1.insert('conversation', self.conversation1)
board1.insert('members', User.get(id=1))
response = self.client.patch(
'/api/v1/boards/1/members/',
headers=self.headers,
content_type='application/json',
data=dumps({'new_data': {'add': [2], 'remove': [1]}}))
expected = {
'status': 'success',
'data': {
'updated_members': [
{'id': 2, 'email': 'first2.last2@email.com',
'name': 'First2 Middle2 Last2',
'phone_number': '000 12 3456782'}]}}
actual = loads(response.data)
self.assertDictEqual(expected, actual)
def test_add_board_members_nonexistent(self):
self.board1.save()
self.user1.save()
board1 = Board.get(id=1)
board1.insert('conversation', self.conversation1)
board1.insert('members', User.get(id=1))
response = self.client.patch(
'/api/v1/boards/1/members/',
headers=self.headers,
content_type='application/json',
data=dumps({'new_data': {'add': [2], 'remove': []}}))
expected = {
'status': 'fail',
'message': 'The user does not exist.',
'help': 'Ensure ids are of existent users.'}
actual = loads(response.data)
self.assertDictEqual(expected, actual)
def test_remove_board_members_nonexistent(self):
self.board1.save()
self.user1.save()
board1 = Board.get(id=1)
board1.insert('conversation', self.conversation1)
board1.insert('members', User.get(id=1))
response = self.client.patch(
'/api/v1/boards/1/members/',
headers=self.headers,
content_type='application/json',
data=dumps({'new_data': {'add': [], 'remove': [2]}}))
expected = {
'status': 'fail',
'message': 'The user is not in the board.',
'help': 'Ensure ids are of board members.'}
actual = loads(response.data)
self.assertDictEqual(expected, actual)
def test_add_board_members_no_data(self):
response = self.client.patch(
'/api/v1/boards/1/members/',
headers=self.headers,
content_type='application/json',
data=dumps({}))
expected = {
'status': 'fail',
'message': 'Members list to add or remove required.',
'help': 'Provide an id list to add or remove.'}
actual = loads(response.data)
self.assertDictEqual(expected, actual)
def test_add_board_members_nonexistent_board(self):
response = self.client.patch(
'/api/v1/boards/1/members/',
headers=self.headers,
content_type='application/json',
data=dumps({'new_data': {'add': [2], 'remove': []}}))
expected = {
'status': 'fail',
'message': 'The board does not exist.',
'help': 'Ensure board_id is of an existent board.'}
actual = loads(response.data)
self.assertDictEqual(expected, actual)
| 38.437975
| 76
| 0.546598
| 1,615
| 15,183
| 5.040248
| 0.093498
| 0.058968
| 0.056388
| 0.059337
| 0.86769
| 0.846192
| 0.820147
| 0.800246
| 0.786609
| 0.770885
| 0
| 0.030645
| 0.312257
| 15,183
| 394
| 77
| 38.535533
| 0.748899
| 0.00494
| 0
| 0.753463
| 0
| 0
| 0.217002
| 0.037106
| 0
| 0
| 0
| 0
| 0.108033
| 1
| 0.069252
| false
| 0
| 0.00831
| 0
| 0.080332
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9ccb601b716c77fd1cbf1b3445042d57db62a967
| 96
|
py
|
Python
|
get-pyhelper.py
|
NotAHamSandwich/PyHelper
|
f976a3b387e6de9b2c2d81be6070bb445e15df51
|
[
"MIT"
] | 1
|
2021-07-24T17:27:48.000Z
|
2021-07-24T17:27:48.000Z
|
get-pyhelper.py
|
NotAHamSandwich/PyHelper
|
f976a3b387e6de9b2c2d81be6070bb445e15df51
|
[
"MIT"
] | null | null | null |
get-pyhelper.py
|
NotAHamSandwich/PyHelper
|
f976a3b387e6de9b2c2d81be6070bb445e15df51
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python3
import os
os.system('chmod +x pyhelper')
os.system('chmod +x pymath')
| 16
| 30
| 0.708333
| 16
| 96
| 4.25
| 0.6875
| 0.235294
| 0.382353
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.104167
| 96
| 5
| 31
| 19.2
| 0.77907
| 0.239583
| 0
| 0
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
9ccb63da0a866ba4184ee55fe9ee99652e12741a
| 1,484
|
py
|
Python
|
tests/test_1870.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_1870.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_1870.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import pytest
"""
Test 1870. Minimum Speed to Arrive on Time
"""
@pytest.fixture(scope="session")
def init_variables_1870():
from src.leetcode_1870_minimum_speed_to_arrive_on_time import Solution
solution = Solution()
def _init_variables_1870():
return solution
yield _init_variables_1870
class TestClass1870:
def test_solution_0(self, init_variables_1870):
assert init_variables_1870().minSpeedOnTime([1, 3, 2], 6) == 1
def test_solution_1(self, init_variables_1870):
assert init_variables_1870().minSpeedOnTime([1, 3, 2], 2.7) == 3
def test_solution_2(self, init_variables_1870):
assert init_variables_1870().minSpeedOnTime([1, 3, 2], 1.9) == -1
#!/usr/bin/env python
import pytest
"""
Test 1870. Minimum Speed to Arrive on Time
"""
@pytest.fixture(scope="session")
def init_variables_1870():
from src.leetcode_1870_minimum_speed_to_arrive_on_time import Solution
solution = Solution()
def _init_variables_1870():
return solution
yield _init_variables_1870
class TestClass1870:
def test_solution_0(self, init_variables_1870):
assert init_variables_1870().minSpeedOnTime([1, 3, 2], 6) == 1
def test_solution_1(self, init_variables_1870):
assert init_variables_1870().minSpeedOnTime([1, 3, 2], 2.7) == 3
def test_solution_2(self, init_variables_1870):
assert init_variables_1870().minSpeedOnTime([1, 3, 2], 1.9) == -1
| 23.555556
| 74
| 0.710243
| 208
| 1,484
| 4.75
| 0.182692
| 0.236842
| 0.309717
| 0.12753
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0.111842
| 0.180593
| 1,484
| 62
| 75
| 23.935484
| 0.700658
| 0.026954
| 0
| 1
| 0
| 0
| 0.010432
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.333333
| false
| 0
| 0.133333
| 0.066667
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 11
|
9ce1104f840556ee901470e1bd729c3c3c28491b
| 93
|
py
|
Python
|
src/sqlIntuitive/sqlGeneration/__init__.py
|
einfachIrgendwer0815/SqlIntuitive
|
11a0548ac2d6cfce295952bbf0f09a4faa4c42af
|
[
"MIT"
] | 6
|
2021-09-10T10:34:47.000Z
|
2022-03-09T13:50:39.000Z
|
src/sqlIntuitive/sqlGeneration/__init__.py
|
einfachIrgendwer0815/SqlIntuitive
|
11a0548ac2d6cfce295952bbf0f09a4faa4c42af
|
[
"MIT"
] | 1
|
2021-11-25T07:10:16.000Z
|
2021-11-26T12:18:14.000Z
|
src/sqlIntuitive/sqlGeneration/__init__.py
|
einfachIrgendwer0815/SqlIntuitive
|
11a0548ac2d6cfce295952bbf0f09a4faa4c42af
|
[
"MIT"
] | null | null | null |
from sqlIntuitive.sqlGeneration import standard
from sqlIntuitive.sqlGeneration import mysql
| 31
| 47
| 0.892473
| 10
| 93
| 8.3
| 0.6
| 0.385542
| 0.698795
| 0.843373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086022
| 93
| 2
| 48
| 46.5
| 0.976471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
9cff41176c6214f02c599d449a6f4fe057f205ba
| 3,215
|
py
|
Python
|
obstacle_avoiding_robot/robot.py
|
flyingbirdinsky/raspberrypi
|
dd8873c4f373bf77b22ec52515a9f4dcafa001c2
|
[
"MIT"
] | 3
|
2018-09-04T13:50:19.000Z
|
2020-11-09T18:04:43.000Z
|
obstacle_avoiding_robot/robot.py
|
flyingbirdinsky/raspberrypi
|
dd8873c4f373bf77b22ec52515a9f4dcafa001c2
|
[
"MIT"
] | null | null | null |
obstacle_avoiding_robot/robot.py
|
flyingbirdinsky/raspberrypi
|
dd8873c4f373bf77b22ec52515a9f4dcafa001c2
|
[
"MIT"
] | 2
|
2018-01-14T16:03:04.000Z
|
2019-12-13T06:18:47.000Z
|
#!/usr/bin/python -tt
import RPi.GPIO as GPIO
import time
import gpio_pins
class Robot:
def __init__(self):
self.pin = gpio_pins.GpioPins()
GPIO.setmode(GPIO.BOARD)
print "setup GPIO pins for robot"
GPIO.setup(self.pin.LEFT_MOTOR_FORWARD, GPIO.OUT)
GPIO.setup(self.pin.LEFT_MOTOR_BACKWARD, GPIO.OUT)
GPIO.setup(self.pin.RIGHT_MOTOR_FORWARD, GPIO.OUT)
GPIO.setup(self.pin.RIGHT_MOTOR_BACKWARD, GPIO.OUT)
self.stop()
def forward(self):
GPIO.output(self.pin.LEFT_MOTOR_FORWARD, 1)
GPIO.output(self.pin.LEFT_MOTOR_BACKWARD, 0)
GPIO.output(self.pin.RIGHT_MOTOR_FORWARD, 1)
GPIO.output(self.pin.RIGHT_MOTOR_BACKWARD, 0)
print "move forward"
time.sleep(0.1)
def backward(self):
GPIO.output(self.pin.LEFT_MOTOR_FORWARD, 0)
GPIO.output(self.pin.LEFT_MOTOR_BACKWARD, 1)
GPIO.output(self.pin.RIGHT_MOTOR_FORWARD, 0)
GPIO.output(self.pin.RIGHT_MOTOR_BACKWARD, 1)
print "move backward"
time.sleep(0.1)
def left(self):
GPIO.output(self.pin.LEFT_MOTOR_FORWARD, 0)
GPIO.output(self.pin.LEFT_MOTOR_BACKWARD, 0)
GPIO.output(self.pin.RIGHT_MOTOR_FORWARD, 1)
GPIO.output(self.pin.RIGHT_MOTOR_BACKWARD, 0)
print "turn left"
time.sleep(0.1)
def right(self):
GPIO.output(self.pin.LEFT_MOTOR_FORWARD, 1)
GPIO.output(self.pin.LEFT_MOTOR_BACKWARD, 0)
GPIO.output(self.pin.RIGHT_MOTOR_FORWARD, 0)
GPIO.output(self.pin.RIGHT_MOTOR_BACKWARD, 0)
print "turn right"
time.sleep(0.1)
def stop(self):
GPIO.output(self.pin.LEFT_MOTOR_FORWARD, 0)
GPIO.output(self.pin.LEFT_MOTOR_BACKWARD, 0)
GPIO.output(self.pin.RIGHT_MOTOR_FORWARD, 0)
GPIO.output(self.pin.RIGHT_MOTOR_BACKWARD, 0)
print "stop"
time.sleep(0.1)
def cleanup(self):
print "cleanup GPIO pins for robot"
GPIO.cleanup()
def test_forward_in_loop(self):
try:
while True:
self.forward()
time.sleep(1)
except KeyboardInterrupt as e:
print e
finally:
self.cleanup()
def test_backward_in_loop(self):
try:
while True:
self.backward()
time.sleep(1)
except KeyboardInterrupt as e:
print e
finally:
self.cleanup()
def test_left_in_loop(self):
try:
while True:
self.left()
time.sleep(1)
except KeyboardInterrupt as e:
print e
finally:
self.cleanup()
def test_right_in_loop(self):
try:
while True:
self.right()
time.sleep(1)
except KeyboardInterrupt as e:
print e
finally:
self.cleanup()
def test_forward_right_in_loop(self):
try:
while True:
self.forward()
time.sleep(2)
self.right()
except KeyboardInterrupt as e:
print e
finally:
self.cleanup()
| 29.768519
| 59
| 0.580404
| 410
| 3,215
| 4.380488
| 0.114634
| 0.097439
| 0.155902
| 0.18931
| 0.827951
| 0.760022
| 0.743875
| 0.714365
| 0.634187
| 0.606347
| 0
| 0.016092
| 0.323484
| 3,215
| 107
| 60
| 30.046729
| 0.809655
| 0.006221
| 0
| 0.628866
| 0
| 0
| 0.031309
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.030928
| null | null | 0.123711
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1405b764f6bdf4ae1813fe27d4a421f8aa2c04d3
| 152
|
py
|
Python
|
openrec/modules/fusions/__init__.py
|
BoData-Bot/openrec
|
3d655d21b762b40d50e53cea96d7802fd49c74ad
|
[
"Apache-2.0"
] | null | null | null |
openrec/modules/fusions/__init__.py
|
BoData-Bot/openrec
|
3d655d21b762b40d50e53cea96d7802fd49c74ad
|
[
"Apache-2.0"
] | null | null | null |
openrec/modules/fusions/__init__.py
|
BoData-Bot/openrec
|
3d655d21b762b40d50e53cea96d7802fd49c74ad
|
[
"Apache-2.0"
] | null | null | null |
from openrec.modules.fusions.fusion import Fusion
from openrec.modules.fusions.concat import Concat
from openrec.modules.fusions.average import Average
| 38
| 51
| 0.861842
| 21
| 152
| 6.238095
| 0.380952
| 0.251908
| 0.412214
| 0.572519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 152
| 3
| 52
| 50.666667
| 0.935714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
141146aa3a5843197c7628a51a2505f0584cbfba
| 4,420
|
py
|
Python
|
api/migrations/0004_auto_20160904_1214.py
|
kushsharma/GotAPI
|
d9712550c1498354e75ce1e2d018b9b71a5989ec
|
[
"Apache-2.0"
] | null | null | null |
api/migrations/0004_auto_20160904_1214.py
|
kushsharma/GotAPI
|
d9712550c1498354e75ce1e2d018b9b71a5989ec
|
[
"Apache-2.0"
] | null | null | null |
api/migrations/0004_auto_20160904_1214.py
|
kushsharma/GotAPI
|
d9712550c1498354e75ce1e2d018b9b71a5989ec
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-04 06:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20160904_1201'),
]
operations = [
migrations.AddField(
model_name='battle',
name='attacker_1',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='attacker_2',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='attacker_3',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='attacker_4',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='attacker_commander',
field=models.CharField(default='null', max_length=150),
),
migrations.AddField(
model_name='battle',
name='attacker_king',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='attacker_outcome',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='attacker_size',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='battle_type',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='defender_1',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='defender_2',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='defender_3',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='defender_4',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='defender_commander',
field=models.CharField(default='null', max_length=150),
),
migrations.AddField(
model_name='battle',
name='defender_king',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='defender_size',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='location',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='major_capture',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='major_death',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='note',
field=models.CharField(default='null', max_length=250),
),
migrations.AddField(
model_name='battle',
name='region',
field=models.CharField(default='null', max_length=50),
),
migrations.AddField(
model_name='battle',
name='summer',
field=models.CharField(default='null', max_length=10),
),
migrations.AlterField(
model_name='battle',
name='id',
field=models.IntegerField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='battle',
name='name',
field=models.CharField(default='null', max_length=100),
),
]
| 32.5
| 73
| 0.544118
| 414
| 4,420
| 5.630435
| 0.169082
| 0.10725
| 0.15444
| 0.195624
| 0.869155
| 0.869155
| 0.81982
| 0.742171
| 0.742171
| 0.742171
| 0
| 0.030354
| 0.329186
| 4,420
| 135
| 74
| 32.740741
| 0.755818
| 0.014932
| 0
| 0.71875
| 1
| 0
| 0.117444
| 0.005286
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015625
| 0
| 0.039063
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1425dd81d88b0fe0ca873b37752d722de0682bfa
| 31,372
|
py
|
Python
|
embyapi/api/tv_shows_service_api.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
embyapi/api/tv_shows_service_api.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
embyapi/api/tv_shows_service_api.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
Emby Server API
Explore the Emby Server API # noqa: E501
OpenAPI spec version: 4.1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from embyapi.api_client import ApiClient
class TvShowsServiceApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_shows_by_id_episodes(self, user_id, id, **kwargs): # noqa: E501
"""Gets episodes for a tv season # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_shows_by_id_episodes(user_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: User Id (required)
:param str id: The series id (required)
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines, TrailerUrls
:param int season: Optional filter by season number.
:param str season_id: Optional. Filter by season id
:param bool is_missing: Optional filter by items that are missing episodes or not.
:param str adjacent_to: Optional. Return items that are siblings of a supplied item.
:param str start_item_id: Optional. Skip through the list until a given item is found.
:param int start_index: Optional. The record index to start at. All items with a lower index will be dropped from the results.
:param int limit: Optional. The maximum number of records to return
:param bool enable_images: Optional, include image information in output
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param bool enable_user_data: Optional, include user data
:param str sort_by: Optional. Specify one or more sort orders, comma delimeted. Options: Album, AlbumArtist, Artist, Budget, CommunityRating, CriticRating, DateCreated, DatePlayed, PlayCount, PremiereDate, ProductionYear, SortName, Random, Revenue, Runtime
:param str sort_order: Sort Order - Ascending,Descending
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_shows_by_id_episodes_with_http_info(user_id, id, **kwargs) # noqa: E501
else:
(data) = self.get_shows_by_id_episodes_with_http_info(user_id, id, **kwargs) # noqa: E501
return data
def get_shows_by_id_episodes_with_http_info(self, user_id, id, **kwargs): # noqa: E501
"""Gets episodes for a tv season # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_shows_by_id_episodes_with_http_info(user_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: User Id (required)
:param str id: The series id (required)
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines, TrailerUrls
:param int season: Optional filter by season number.
:param str season_id: Optional. Filter by season id
:param bool is_missing: Optional filter by items that are missing episodes or not.
:param str adjacent_to: Optional. Return items that are siblings of a supplied item.
:param str start_item_id: Optional. Skip through the list until a given item is found.
:param int start_index: Optional. The record index to start at. All items with a lower index will be dropped from the results.
:param int limit: Optional. The maximum number of records to return
:param bool enable_images: Optional, include image information in output
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param bool enable_user_data: Optional, include user data
:param str sort_by: Optional. Specify one or more sort orders, comma delimeted. Options: Album, AlbumArtist, Artist, Budget, CommunityRating, CriticRating, DateCreated, DatePlayed, PlayCount, PremiereDate, ProductionYear, SortName, Random, Revenue, Runtime
:param str sort_order: Sort Order - Ascending,Descending
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id', 'id', 'fields', 'season', 'season_id', 'is_missing', 'adjacent_to', 'start_item_id', 'start_index', 'limit', 'enable_images', 'image_type_limit', 'enable_image_types', 'enable_user_data', 'sort_by', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_shows_by_id_episodes" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_shows_by_id_episodes`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_shows_by_id_episodes`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
if 'fields' in params:
query_params.append(('Fields', params['fields'])) # noqa: E501
if 'season' in params:
query_params.append(('Season', params['season'])) # noqa: E501
if 'season_id' in params:
query_params.append(('SeasonId', params['season_id'])) # noqa: E501
if 'is_missing' in params:
query_params.append(('IsMissing', params['is_missing'])) # noqa: E501
if 'adjacent_to' in params:
query_params.append(('AdjacentTo', params['adjacent_to'])) # noqa: E501
if 'start_item_id' in params:
query_params.append(('StartItemId', params['start_item_id'])) # noqa: E501
if 'start_index' in params:
query_params.append(('StartIndex', params['start_index'])) # noqa: E501
if 'limit' in params:
query_params.append(('Limit', params['limit'])) # noqa: E501
if 'enable_images' in params:
query_params.append(('EnableImages', params['enable_images'])) # noqa: E501
if 'image_type_limit' in params:
query_params.append(('ImageTypeLimit', params['image_type_limit'])) # noqa: E501
if 'enable_image_types' in params:
query_params.append(('EnableImageTypes', params['enable_image_types'])) # noqa: E501
if 'enable_user_data' in params:
query_params.append(('EnableUserData', params['enable_user_data'])) # noqa: E501
if 'sort_by' in params:
query_params.append(('SortBy', params['sort_by'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('SortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Shows/{Id}/Episodes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryResultBaseItemDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_shows_by_id_seasons(self, user_id, id, **kwargs): # noqa: E501
"""Gets seasons for a tv series # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_shows_by_id_seasons(user_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: User Id (required)
:param str id: The series id (required)
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines, TrailerUrls
:param bool is_special_season: Optional. Filter by special season.
:param bool is_missing: Optional filter by items that are missing episodes or not.
:param str adjacent_to: Optional. Return items that are siblings of a supplied item.
:param bool enable_images: Optional, include image information in output
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param bool enable_user_data: Optional, include user data
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_shows_by_id_seasons_with_http_info(user_id, id, **kwargs) # noqa: E501
else:
(data) = self.get_shows_by_id_seasons_with_http_info(user_id, id, **kwargs) # noqa: E501
return data
def get_shows_by_id_seasons_with_http_info(self, user_id, id, **kwargs): # noqa: E501
"""Gets seasons for a tv series # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_shows_by_id_seasons_with_http_info(user_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: User Id (required)
:param str id: The series id (required)
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines, TrailerUrls
:param bool is_special_season: Optional. Filter by special season.
:param bool is_missing: Optional filter by items that are missing episodes or not.
:param str adjacent_to: Optional. Return items that are siblings of a supplied item.
:param bool enable_images: Optional, include image information in output
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param bool enable_user_data: Optional, include user data
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id', 'id', 'fields', 'is_special_season', 'is_missing', 'adjacent_to', 'enable_images', 'image_type_limit', 'enable_image_types', 'enable_user_data'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_shows_by_id_seasons" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_shows_by_id_seasons`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_shows_by_id_seasons`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
if 'fields' in params:
query_params.append(('Fields', params['fields'])) # noqa: E501
if 'is_special_season' in params:
query_params.append(('IsSpecialSeason', params['is_special_season'])) # noqa: E501
if 'is_missing' in params:
query_params.append(('IsMissing', params['is_missing'])) # noqa: E501
if 'adjacent_to' in params:
query_params.append(('AdjacentTo', params['adjacent_to'])) # noqa: E501
if 'enable_images' in params:
query_params.append(('EnableImages', params['enable_images'])) # noqa: E501
if 'image_type_limit' in params:
query_params.append(('ImageTypeLimit', params['image_type_limit'])) # noqa: E501
if 'enable_image_types' in params:
query_params.append(('EnableImageTypes', params['enable_image_types'])) # noqa: E501
if 'enable_user_data' in params:
query_params.append(('EnableUserData', params['enable_user_data'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Shows/{Id}/Seasons', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryResultBaseItemDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_shows_nextup(self, user_id, **kwargs): # noqa: E501
"""Gets a list of next up episodes # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_shows_nextup(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: User Id (required)
:param int start_index: Optional. The record index to start at. All items with a lower index will be dropped from the results.
:param int limit: Optional. The maximum number of records to return
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines, TrailerUrls
:param str series_id: Optional. Filter by series id
:param str parent_id: Specify this to localize the search to a specific item or folder. Omit to use the root
:param bool enable_images: Optional, include image information in output
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param bool enable_user_data: Optional, include user data
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_shows_nextup_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.get_shows_nextup_with_http_info(user_id, **kwargs) # noqa: E501
return data
def get_shows_nextup_with_http_info(self, user_id, **kwargs): # noqa: E501
"""Gets a list of next up episodes # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_shows_nextup_with_http_info(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: User Id (required)
:param int start_index: Optional. The record index to start at. All items with a lower index will be dropped from the results.
:param int limit: Optional. The maximum number of records to return
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines, TrailerUrls
:param str series_id: Optional. Filter by series id
:param str parent_id: Specify this to localize the search to a specific item or folder. Omit to use the root
:param bool enable_images: Optional, include image information in output
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param bool enable_user_data: Optional, include user data
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id', 'start_index', 'limit', 'fields', 'series_id', 'parent_id', 'enable_images', 'image_type_limit', 'enable_image_types', 'enable_user_data'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_shows_nextup" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_shows_nextup`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
if 'start_index' in params:
query_params.append(('StartIndex', params['start_index'])) # noqa: E501
if 'limit' in params:
query_params.append(('Limit', params['limit'])) # noqa: E501
if 'fields' in params:
query_params.append(('Fields', params['fields'])) # noqa: E501
if 'series_id' in params:
query_params.append(('SeriesId', params['series_id'])) # noqa: E501
if 'parent_id' in params:
query_params.append(('ParentId', params['parent_id'])) # noqa: E501
if 'enable_images' in params:
query_params.append(('EnableImages', params['enable_images'])) # noqa: E501
if 'image_type_limit' in params:
query_params.append(('ImageTypeLimit', params['image_type_limit'])) # noqa: E501
if 'enable_image_types' in params:
query_params.append(('EnableImageTypes', params['enable_image_types'])) # noqa: E501
if 'enable_user_data' in params:
query_params.append(('EnableUserData', params['enable_user_data'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Shows/NextUp', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryResultBaseItemDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_shows_upcoming(self, user_id, **kwargs): # noqa: E501
"""Gets a list of upcoming episodes # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_shows_upcoming(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: User Id (required)
:param int start_index: Optional. The record index to start at. All items with a lower index will be dropped from the results.
:param int limit: Optional. The maximum number of records to return
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines, TrailerUrls
:param str parent_id: Specify this to localize the search to a specific item or folder. Omit to use the root
:param bool enable_images: Optional, include image information in output
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param bool enable_user_data: Optional, include user data
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_shows_upcoming_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.get_shows_upcoming_with_http_info(user_id, **kwargs) # noqa: E501
return data
def get_shows_upcoming_with_http_info(self, user_id, **kwargs): # noqa: E501
"""Gets a list of upcoming episodes # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_shows_upcoming_with_http_info(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: User Id (required)
:param int start_index: Optional. The record index to start at. All items with a lower index will be dropped from the results.
:param int limit: Optional. The maximum number of records to return
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines, TrailerUrls
:param str parent_id: Specify this to localize the search to a specific item or folder. Omit to use the root
:param bool enable_images: Optional, include image information in output
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param bool enable_user_data: Optional, include user data
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id', 'start_index', 'limit', 'fields', 'parent_id', 'enable_images', 'image_type_limit', 'enable_image_types', 'enable_user_data'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_shows_upcoming" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_shows_upcoming`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
if 'start_index' in params:
query_params.append(('StartIndex', params['start_index'])) # noqa: E501
if 'limit' in params:
query_params.append(('Limit', params['limit'])) # noqa: E501
if 'fields' in params:
query_params.append(('Fields', params['fields'])) # noqa: E501
if 'parent_id' in params:
query_params.append(('ParentId', params['parent_id'])) # noqa: E501
if 'enable_images' in params:
query_params.append(('EnableImages', params['enable_images'])) # noqa: E501
if 'image_type_limit' in params:
query_params.append(('ImageTypeLimit', params['image_type_limit'])) # noqa: E501
if 'enable_image_types' in params:
query_params.append(('EnableImageTypes', params['enable_image_types'])) # noqa: E501
if 'enable_user_data' in params:
query_params.append(('EnableUserData', params['enable_user_data'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Shows/Upcoming', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryResultBaseItemDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 53.535836
| 352
| 0.6567
| 3,900
| 31,372
| 5.081282
| 0.068462
| 0.040369
| 0.042035
| 0.041227
| 0.957915
| 0.954635
| 0.944997
| 0.941162
| 0.940405
| 0.940405
| 0
| 0.01327
| 0.255355
| 31,372
| 585
| 353
| 53.62735
| 0.835024
| 0.464331
| 0
| 0.8
| 1
| 0
| 0.246591
| 0.032848
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029508
| false
| 0
| 0.013115
| 0
| 0.085246
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
142e446a9e5745fbdb5725181d94e359b6a7df52
| 12,722
|
py
|
Python
|
alembic/versions/5ca019edf61f_cascade_on_delete.py
|
JanSkalny/RootTheBox
|
64f0397dd60cd739270eada16d0db666071d8de6
|
[
"Apache-2.0"
] | 635
|
2015-01-01T20:04:14.000Z
|
2022-03-31T16:43:01.000Z
|
alembic/versions/5ca019edf61f_cascade_on_delete.py
|
JanSkalny/RootTheBox
|
64f0397dd60cd739270eada16d0db666071d8de6
|
[
"Apache-2.0"
] | 376
|
2015-01-03T20:19:27.000Z
|
2022-03-28T16:24:44.000Z
|
alembic/versions/5ca019edf61f_cascade_on_delete.py
|
JanSkalny/RootTheBox
|
64f0397dd60cd739270eada16d0db666071d8de6
|
[
"Apache-2.0"
] | 271
|
2015-01-01T23:57:17.000Z
|
2022-03-04T13:25:10.000Z
|
"""Cascade on Delete
Revision ID: 5ca019edf61f
Revises: 469f428604aa
Create Date: 2019-06-23 05:49:26.061932
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "5ca019edf61f"
down_revision = "469f428604aa"
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("penalty") as batch_op:
batch_op.drop_constraint("penalty_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("penalty_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"penalty_ibfk_1", "penalty", "team", ["team_id"], ["id"], ondelete="CASCADE"
)
op.create_foreign_key(
"penalty_ibfk_2", "penalty", "flag", ["flag_id"], ["id"], ondelete="CASCADE"
)
with op.batch_alter_table("snapshot_team") as batch_op:
batch_op.drop_constraint("snapshot_team_ibfk_1", type_="foreignkey")
op.create_foreign_key(
"snapshot_team_ibfk_1",
"snapshot_team",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("snapshot_to_snapshot_team") as batch_op:
batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"snapshot_to_snapshot_team_ibfk_1",
"snapshot_to_snapshot_team",
"snapshot",
["snapshot_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"snapshot_to_snapshot_team_ibfk_2",
"snapshot_to_snapshot_team",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("snapshot_team_to_flag") as batch_op:
batch_op.drop_constraint("snapshot_team_to_flag_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("snapshot_team_to_flag_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"snapshot_team_to_flag_ibfk_1",
"snapshot_team_to_flag",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"snapshot_team_to_flag_ibfk_2",
"snapshot_team_to_flag",
"flag",
["flag_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("snapshot_team_to_game_level") as batch_op:
batch_op.drop_constraint(
"snapshot_team_to_game_level_ibfk_1", type_="foreignkey"
)
batch_op.drop_constraint(
"snapshot_team_to_game_level_ibfk_2", type_="foreignkey"
)
op.create_foreign_key(
"snapshot_team_to_game_level_ibfk_1",
"snapshot_team_to_game_level",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"snapshot_team_to_game_level_ibfk_2",
"snapshot_team_to_game_level",
"game_level",
["gam_level_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_box") as batch_op:
batch_op.drop_constraint("team_to_box_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_box_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_box_ibfk_1",
"team_to_box",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_box_ibfk_2",
"team_to_box",
"box",
["box_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_item") as batch_op:
batch_op.drop_constraint("team_to_item_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_item_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_item_ibfk_1",
"team_to_item",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_item_ibfk_2",
"team_to_item",
"market_item",
["item_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_source_code") as batch_op:
batch_op.drop_constraint("team_to_source_code_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_source_code_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_source_code_ibfk_1",
"team_to_source_code",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_source_code_ibfk_2",
"team_to_source_code",
"source_code",
["source_code_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_hint") as batch_op:
batch_op.drop_constraint("team_to_hint_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_hint_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_hint_ibfk_1",
"team_to_hint",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_hint_ibfk_2",
"team_to_hint",
"hint",
["hint_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_flag") as batch_op:
batch_op.drop_constraint("team_to_flag_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_flag_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_flag_ibfk_1",
"team_to_flag",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_flag_ibfk_2",
"team_to_flag",
"flag",
["flag_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_game_level") as batch_op:
batch_op.drop_constraint("team_to_game_level_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_game_level_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_game_level_ibfk_1",
"team_to_game_level",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_game_level_ibfk_2",
"team_to_game_level",
"game_level",
["game_level_id"],
["id"],
ondelete="CASCADE",
)
def downgrade():
with op.batch_alter_table("penalty") as batch_op:
batch_op.drop_constraint("penalty_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("penalty_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"penalty_ibfk_1", "penalty", "team", ["team_id"], ["id"], ondelete="RESTRICT"
)
op.create_foreign_key(
"penalty_ibfk_2", "penalty", "flag", ["flag_id"], ["id"], ondelete="RESTRICT"
)
with op.batch_alter_table("snapshot_team") as batch_op:
batch_op.drop_constraint("snapshot_team_ibfk_1", type_="foreignkey")
op.create_foreign_key(
"snapshot_team_ibfk_1",
"snapshot_team",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("snapshot_to_snapshot_team") as batch_op:
batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"snapshot_to_snapshot_team_ibfk_1",
"snapshot_to_snapshot_team",
"snapshot",
["snapshot_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"snapshot_to_snapshot_team_ibfk_2",
"snapshot_to_snapshot_team",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("snapshot_team_to_flag") as batch_op:
batch_op.drop_constraint("snapshot_team_to_flag_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("snapshot_team_to_flag_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"snapshot_team_to_flag_ibfk_1",
"snapshot_team_to_flag",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"snapshot_team_to_flag_ibfk_2",
"snapshot_team_to_flag",
"flag",
["flag_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("snapshot_team_to_game_level") as batch_op:
batch_op.drop_constraint(
"snapshot_team_to_game_level_ibfk_1", type_="foreignkey"
)
batch_op.drop_constraint(
"snapshot_team_to_game_level_ibfk_2", type_="foreignkey"
)
op.create_foreign_key(
"snapshot_team_to_game_level_ibfk_1",
"snapshot_team_to_game_level",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"snapshot_team_to_game_level_ibfk_2",
"snapshot_team_to_game_level",
"game_level",
["gam_level_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("team_to_box") as batch_op:
batch_op.drop_constraint("team_to_box_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_box_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_box_ibfk_1",
"team_to_box",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"team_to_box_ibfk_2",
"team_to_box",
"box",
["box_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("team_to_item") as batch_op:
batch_op.drop_constraint("team_to_item_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_item_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_item_ibfk_1",
"team_to_item",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"team_to_item_ibfk_2",
"team_to_item",
"market_item",
["item_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("team_to_source_code") as batch_op:
batch_op.drop_constraint("team_to_source_code_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_source_code_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_source_code_ibfk_1",
"team_to_source_code",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"team_to_source_code_ibfk_2",
"team_to_source_code",
"source_code",
["source_code_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("team_to_hint") as batch_op:
batch_op.drop_constraint("team_to_hint_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_hint_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_hint_ibfk_1",
"team_to_hint",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"team_to_hint_ibfk_2",
"team_to_hint",
"hint",
["hint_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("team_to_flag") as batch_op:
batch_op.drop_constraint("team_to_flag_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_flag_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_flag_ibfk_1",
"team_to_flag",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"team_to_flag_ibfk_2",
"team_to_flag",
"flag",
["flag_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("team_to_game_level") as batch_op:
batch_op.drop_constraint("team_to_game_level_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_game_level_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_game_level_ibfk_1",
"team_to_game_level",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"team_to_game_level_ibfk_2",
"team_to_game_level",
"game_level",
["game_level_id"],
["id"],
ondelete="RESTRICT",
)
| 29.517401
| 88
| 0.613897
| 1,579
| 12,722
| 4.402153
| 0.040532
| 0.096677
| 0.066465
| 0.126888
| 0.963746
| 0.961588
| 0.961588
| 0.961588
| 0.961588
| 0.95943
| 0
| 0.014196
| 0.258057
| 12,722
| 430
| 89
| 29.586047
| 0.722216
| 0.011476
| 0
| 0.853535
| 0
| 0
| 0.358132
| 0.127148
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005051
| false
| 0
| 0.005051
| 0
| 0.010101
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1445b1c32b3c022d132a0b891ff1d82739f13b3a
| 19
|
py
|
Python
|
test01/login.py
|
16675571090/test
|
1953dc03b559d01df00c7b68ab08ce012c74ad86
|
[
"MIT"
] | null | null | null |
test01/login.py
|
16675571090/test
|
1953dc03b559d01df00c7b68ab08ce012c74ad86
|
[
"MIT"
] | null | null | null |
test01/login.py
|
16675571090/test
|
1953dc03b559d01df00c7b68ab08ce012c74ad86
|
[
"MIT"
] | null | null | null |
a =1
c = 2
b =2
| 2.714286
| 5
| 0.315789
| 6
| 19
| 1
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.526316
| 19
| 6
| 6
| 3.166667
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1455156a0397b53843a1d95cccb6be85b2889343
| 167
|
py
|
Python
|
python/src/mitoscripts/__init__.py
|
granthussey/MitoScripts
|
79b9d8be2dcf94e3fbc22d735e22bf773b7d0079
|
[
"MIT"
] | 1
|
2019-12-18T20:18:51.000Z
|
2019-12-18T20:18:51.000Z
|
python/src/mitoscripts/__init__.py
|
granthussey/MitoScripts
|
79b9d8be2dcf94e3fbc22d735e22bf773b7d0079
|
[
"MIT"
] | null | null | null |
python/src/mitoscripts/__init__.py
|
granthussey/MitoScripts
|
79b9d8be2dcf94e3fbc22d735e22bf773b7d0079
|
[
"MIT"
] | null | null | null |
#__all__ = ['mitographer', 'mitopca', 'mitodata']
#from mitoscripts import mitodata
#from mitoscripts import mitographer
#from mitoscripts import mitopca
| 27.833333
| 49
| 0.730539
| 16
| 167
| 7.375
| 0.4375
| 0.381356
| 0.533898
| 0.491525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179641
| 167
| 6
| 50
| 27.833333
| 0.861314
| 0.874252
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
146e74ec6608e0ff83d62117a9896558e9ad0419
| 5,872
|
py
|
Python
|
tests/test_socfaker_products_alienvault_usm.py
|
priamai/soc-faker
|
51b587f0cec52212136905280406e915006d2afc
|
[
"MIT"
] | 122
|
2020-02-21T16:06:54.000Z
|
2022-03-21T13:53:03.000Z
|
tests/test_socfaker_products_alienvault_usm.py
|
priamai/soc-faker
|
51b587f0cec52212136905280406e915006d2afc
|
[
"MIT"
] | 13
|
2020-01-29T16:37:05.000Z
|
2022-01-27T21:30:10.000Z
|
tests/test_socfaker_products_alienvault_usm.py
|
priamai/soc-faker
|
51b587f0cec52212136905280406e915006d2afc
|
[
"MIT"
] | 20
|
2020-04-10T11:59:29.000Z
|
2022-02-10T09:20:26.000Z
|
def test_socfaker_products_alienvault_usm_event_type(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.event_type
def test_socfaker_products_alienvault_usm_id(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.id
def test_socfaker_products_alienvault_usm_description(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.description
def test_socfaker_products_alienvault_usm_severity(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.severity
def test_socfaker_products_alienvault_usm_action(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.action
def test_socfaker_products_alienvault_usm_category(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.category
def test_socfaker_products_alienvault_usm_subcategory(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.subcategory
def test_socfaker_products_alienvault_usm_destination_hostname(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.destination_hostname
def test_socfaker_products_alienvault_usm_destination_fqdn(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.destination_fqdn
def test_socfaker_products_alienvault_usm_destination_address(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.destination_address
def test_socfaker_products_alienvault_usm_destination_port(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.destination_port
def test_socfaker_products_alienvault_usm_destination_port_label(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.destination_port_label
def test_socfaker_products_alienvault_usm_destination_asset_id(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.destination_asset_id
def test_socfaker_products_alienvault_usm_destination_longitude(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.destination_longitude
def test_socfaker_products_alienvault_usm_destination_latitude(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.destination_latitude
def test_socfaker_products_alienvault_usm_destination_city(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.destination_city
def test_socfaker_products_alienvault_usm_destination_country(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.destination_country
def test_socfaker_products_alienvault_usm_destination_region(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.destination_region
def test_socfaker_products_alienvault_usm_source_hostname(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.source_hostname
def test_socfaker_products_alienvault_usm_source_fqdn(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.source_fqdn
def test_socfaker_products_alienvault_usm_source_address(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.source_address
def test_socfaker_products_alienvault_usm_source_port(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.source_port
def test_socfaker_products_alienvault_usm_source_port_label(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.source_port_label
def test_socfaker_products_alienvault_usm_source_asset_id(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.source_asset_id
def test_socfaker_products_alienvault_usm_source_longitude(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.source_longitude
def test_socfaker_products_alienvault_usm_source_latitude(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.source_latitude
def test_socfaker_products_alienvault_usm_source_city(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.source_city
def test_socfaker_products_alienvault_usm_source_country(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.source_country
def test_socfaker_products_alienvault_usm_source_region(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.source_region
def test_socfaker_products_alienvault_usm_plugin(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.plugin
def test_socfaker_products_alienvault_usm_plugin_device(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.plugin_device
def test_socfaker_products_alienvault_usm_plugin_device_type(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.plugin_device_type
def test_socfaker_products_alienvault_usm_plugin_version(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.plugin_version
def test_socfaker_products_alienvault_usm_packets_sent(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.packets_sent
def test_socfaker_products_alienvault_usm_packets_received(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.packets_received
def test_socfaker_products_alienvault_usm_packet_type(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.packet_type
def test_socfaker_products_alienvault_usm_bytes_in(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.bytes_in
def test_socfaker_products_alienvault_usm_bytes_out(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.bytes_out
def test_socfaker_products_alienvault_usm_app_display_name(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.app_display_name
def test_socfaker_products_alienvault_usm_application_protocol(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.application_protocol
def test_socfaker_products_alienvault_usm_transport_protocol(socfaker_fixture):
assert socfaker_fixture.products.alienvault.USM.transport_protocol
| 48.131148
| 83
| 0.894414
| 736
| 5,872
| 6.637228
| 0.061141
| 0.302149
| 0.352508
| 0.19304
| 0.981986
| 0.956192
| 0.884135
| 0.658751
| 0.49826
| 0.115046
| 0
| 0
| 0.05671
| 5,872
| 122
| 84
| 48.131148
| 0.881928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
14987fc68a40f8a4ba1ced8b583b285642ddc4c1
| 12,306
|
py
|
Python
|
plugins/statistics.py
|
sooualil/atlas-feature-extraction-extension
|
d0f9284ff710d095b1b61c226a28de26f738dd0c
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/statistics.py
|
sooualil/atlas-feature-extraction-extension
|
d0f9284ff710d095b1b61c226a28de26f738dd0c
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/statistics.py
|
sooualil/atlas-feature-extraction-extension
|
d0f9284ff710d095b1b61c226a28de26f738dd0c
|
[
"BSD-3-Clause"
] | null | null | null |
from nfstream import NFPlugin
import math
class AuxPktSizeFeatures(NFPlugin):
"""
This pluguin counts the number of packet per size interval
Attributes
----------
flow.udps.num_pkts_up_to_128_bytes: %NUM_PKTS_UP_TO_128_BYTES number of packet having less than 128 bytes
flow.udps.num_pkts_128_to_256_bytes: %NUM_PKTS_128_TO_256_BYTES number of packet having size between 128 and 256 bytes
flow.udps.num_pkts_256_to_512_bytes: %NUM_PKTS_256_TO_512_BYTES number of packet having size between 256 and 512 bytes
flow.udps.num_pkts_512_to_1024_bytes: %NUM_PKTS_512_TO_1024_BYTES number of packet having size between 512 and 1024 bytes
flow.udps.num_pkts_1024_to_1514_bytes: %NUM_PKTS_1024_TO_1514_BYTES number of packet having size greater than 1024 bytes
"""
def on_init(self, packet, flow):
flow.udps.num_pkts_up_to_128_bytes = 0
flow.udps.num_pkts_128_to_256_bytes = 0
flow.udps.num_pkts_256_to_512_bytes = 0
flow.udps.num_pkts_512_to_1024_bytes = 0
flow.udps.num_pkts_1024_to_1514_bytes = 0
if packet.ip_size <= 128:
flow.udps.num_pkts_up_to_128_bytes += 1
elif packet.ip_size > 128 and packet.ip_size <= 256:
flow.udps.num_pkts_128_to_256_bytes += 1
elif packet.ip_size > 256 and packet.ip_size <= 512:
flow.udps.num_pkts_256_to_512_bytes += 1
elif packet.ip_size > 512 and packet.ip_size <= 1024:
flow.udps.num_pkts_512_to_1024_bytes += 1
elif packet.ip_size > 1024 and packet.ip_size <= 1514:
flow.udps.num_pkts_1024_to_1514_bytes += 1
def on_update(self, packet, flow):
if packet.ip_size <= 128:
flow.udps.num_pkts_up_to_128_bytes += 1
elif packet.ip_size > 128 and packet.ip_size <= 256:
flow.udps.num_pkts_128_to_256_bytes += 1
elif packet.ip_size > 256 and packet.ip_size <= 512:
flow.udps.num_pkts_256_to_512_bytes += 1
elif packet.ip_size > 512 and packet.ip_size <= 1024:
flow.udps.num_pkts_512_to_1024_bytes += 1
elif packet.ip_size > 1024 and packet.ip_size <= 1514:
flow.udps.num_pkts_1024_to_1514_bytes += 1
class AuxSecBytesFeatures(NFPlugin):
"""
This pluguin computes second_bytes and throughput for each direction
Attributes
----------
flow.udps.src_to_dst_second_bytes: %SRC_TO_DST_SECOND_BYTES Bytes/sec (src->dst)
flow.udps.dst_to_src_second_bytes: %DST_TO_SRC_SECOND_BYTES Bytes/sec2 (dst->src)
flow.udps.src_to_dst_avg_throughput: %SRC_TO_DST_AVG_THROUGHPUT Src to dst average thpt (bps)
flow.udps.dst_to_src_avg_throughput: %DST_TO_SRC_AVG_THROUGHPUT Dst to src average thpt (bps)
flow.udps.src_to_dst_second_bytes2: %SRC_TO_DST_SECOND_BYTES Bytes/sec (src->dst)
flow.udps.dst_to_src_second_bytes2: %DST_TO_SRC_SECOND_BYTES Bytes/sec2 (dst->src)
flow.udps.src_to_dst_avg_throughput2: %SRC_TO_DST_AVG_THROUGHPUT Src to dst average thpt (bps)
flow.udps.dst_to_src_avg_throughput2: %DST_TO_SRC_AVG_THROUGHPUT Dst to src average thpt (bps)
"""
def on_init(self, packet, flow):
self.dic_src2dst = {}
self.dic_dst2src = {}
self.k_s2d = 0
self.k_d2s = 0
flow.udps.src_to_dst_second_bytes = 0
flow.udps.dst_to_src_second_bytes = 0
flow.udps.src_to_dst_avg_throughput = 0
flow.udps.dst_to_src_avg_throughput = 0
###
flow.udps.src_to_dst_second_bytes2 = 0
flow.udps.dst_to_src_second_bytes2 = 0
flow.udps.src_to_dst_avg_throughput2 = 0
flow.udps.dst_to_src_avg_throughput2 = 0
if packet.direction == 0:
self.k_s2d = self.k_s2d + 1
self.dic_src2dst[self.k_s2d] = {'is_completed':False, 'start': packet.time, 'end':packet.time, 'size':packet.ip_size}
elif packet.direction == 1:
self.k_d2s = self.k_d2s + 1
self.dic_dst2src[self.k_d2s] = {'is_completed':False, 'start': packet.time, 'end':packet.time, 'size':packet.ip_size}
def on_update(self, packet, flow):
if packet.direction == 0:
if self.k_s2d < 1:
self.k_s2d = self.k_s2d + 1
self.dic_src2dst[self.k_s2d] = {'is_completed':False, 'start': packet.time, 'end':packet.time, 'size':packet.ip_size}
else:
if self.dic_src2dst[self.k_s2d]['is_completed'] == True:
#print('completed s2d, key :', last_key)
self.k_s2d = self.k_s2d+ 1
#print('new key :', new_key)
self.dic_src2dst[self.k_s2d]= {'is_completed':False, 'start': packet.time, 'end':packet.time, 'size':packet.ip_size}
else:
start = self.dic_src2dst[self.k_s2d]['start']
end = self.dic_src2dst[self.k_s2d]['end']
delta1 = (packet.time - start) / 1000
delta2 = (packet.time - end) / 1000
if delta1 <= 1:
self.dic_src2dst[self.k_s2d]['end']= packet.time
self.dic_src2dst[self.k_s2d]['size'] = self.dic_src2dst[self.k_s2d]['size'] + packet.ip_size
if delta1 == 1:
self.dic_src2dst[self.k_s2d]['is_completed'] = True
elif delta1 > 1:
self.dic_src2dst[self.k_s2d]['is_completed'] = True
if math.floor(delta2) >= 1:
for i in range(math.floor(delta2)):
self.k_s2d = self.k_s2d + i + 1
self.dic_src2dst[self.k_s2d]= {'is_completed':True, 'start': 0, 'end':0, 'size':0}
if delta2 % 1 != 0:
last_key = list(self.dic_src2dst.keys())[-1]
self.k_s2d = self.k_s2d + 1
self.dic_src2dst[self.k_s2d]= {'is_completed':False, 'start': packet.time, 'end':packet.time, 'size':packet.ip_size}
else:
self.k_s2d = self.k_s2d + 1
self.dic_src2dst[self.k_s2d]= {'is_completed':False, 'start': packet.time, 'end':packet.time, 'size':packet.ip_size}
elif packet.direction == 1:
if self.k_d2s < 1:
self.k_d2s = self.k_d2s + 1
self.dic_dst2src[self.k_d2s] = {'is_completed':False, 'start': packet.time, 'end':packet.time, 'size':packet.ip_size}
else:
if self.dic_dst2src[self.k_d2s]['is_completed'] == True:
#print('completed s2d, key :', last_key)
self.k_d2s = self.k_d2s+1
#print('new key :', new_key)
self.dic_dst2src[self.k_d2s]= {'is_completed':False, 'start': packet.time, 'end':packet.time, 'size':packet.ip_size}
else:
start = self.dic_dst2src[self.k_d2s]['start']
end = self.dic_dst2src[self.k_d2s]['end']
delta1 = (packet.time - start) / 1000
delta2 = (packet.time - end) / 1000
if delta1 <= 1:
self.dic_dst2src[self.k_d2s]['end']= packet.time
self.dic_dst2src[self.k_d2s]['size'] = self.dic_dst2src[self.k_d2s]['size'] + packet.ip_size
if delta1 == 1:
self.dic_dst2src[self.k_d2s]['is_completed'] = True
elif delta1 > 1:
self.dic_dst2src[self.k_d2s]['is_completed'] = True
if math.floor(delta2) >= 1:
for i in range(math.floor(delta2)):
self.k_d2s = self.k_d2s + i + 1
self.dic_dst2src[self.k_d2s]= {'is_completed':True, 'start': 0, 'end':0, 'size':0}
if delta2 % 1 != 0:
self.k_d2s = self.k_d2s + 1
self.dic_dst2src[self.k_d2s]= {'is_completed':False, 'start': packet.time, 'end':packet.time, 'size':packet.ip_size}
else:
self.k_d2s = self.k_d2s + 1
self.dic_dst2src[self.k_d2s]= {'is_completed':False, 'start': packet.time, 'end':packet.time, 'size':packet.ip_size}
thpt_s2d = 0
thpt_d2s = 0
scb_s2d = 0
scb_d2s = 0
l_s2d = 0
l_d2s = 0
for k in list(self.dic_src2dst.keys()):
size = self.dic_src2dst[k]['size']
if size > 0:
scb_s2d += size
thpt_s2d += (8 * size)
l_s2d += 1
for k in list(self.dic_dst2src.keys()):
size = self.dic_dst2src[k]['size']
if size > 0:
scb_d2s += size
thpt_d2s += (8 * size)
l_d2s += 1
if l_s2d > 0:
scb_s2d = scb_s2d / l_s2d
thpt_s2d = thpt_s2d / l_s2d
else:
scb_s2d = flow.src2dst_bytes
thpt_s2d = 8 * flow.src2dst_bytes
if l_d2s > 0:
scb_d2s = scb_d2s / l_d2s
thpt_d2s = thpt_d2s / l_d2s
else:
scb_d2s = flow.dst2src_bytes
thpt_d2s = 8 * flow.dst2src_bytes
flow.udps.src_to_dst_second_bytes = scb_s2d
flow.udps.dst_to_src_second_bytes = scb_d2s
flow.udps.src_to_dst_avg_throughput = thpt_s2d
flow.udps.dst_to_src_avg_throughput = thpt_d2s
flow.udps.src_to_dst_second_bytes2 = flow.src2dst_bytes/(flow.src2dst_duration_ms/1000) if flow.src2dst_duration_ms > 0 else flow.src2dst_bytes
flow.udps.dst_to_src_second_bytes2 = flow.dst2src_bytes/(flow.dst2src_duration_ms/1000) if flow.dst2src_duration_ms > 0 else flow.dst2src_bytes
flow.udps.src_to_dst_avg_throughput2 = (8 * flow.src2dst_bytes/(flow.src2dst_duration_ms/1000)) if flow.src2dst_duration_ms > 0 else (8 * flow.src2dst_bytes)
flow.udps.dst_to_src_avg_throughput2 = (8 * flow.dst2src_bytes/(flow.dst2src_duration_ms/1000)) if flow.dst2src_duration_ms > 0 else (8 * flow.dst2src_bytes)
def on_expire(self, flow):
thpt_s2d = 0
thpt_d2s = 0
scb_s2d = 0
scb_d2s = 0
l_s2d = 0
l_d2s = 0
for k in list(self.dic_src2dst.keys()):
size = self.dic_src2dst[k]['size']
if size > 0:
scb_s2d += size
thpt_s2d += (8 * size)
l_s2d += 1
for k in list(self.dic_dst2src.keys()):
size = self.dic_dst2src[k]['size']
if size > 0:
scb_d2s += size
thpt_d2s += (8 * size)
l_d2s += 1
if l_s2d > 0:
scb_s2d = scb_s2d / l_s2d
thpt_s2d = thpt_s2d / l_s2d
else:
scb_s2d = flow.src2dst_bytes
thpt_s2d = 8 * flow.src2dst_bytes
if l_d2s > 0:
scb_d2s = scb_d2s / l_d2s
thpt_d2s = thpt_d2s / l_d2s
else:
scb_d2s = flow.dst2src_bytes
thpt_d2s = 8 * flow.dst2src_bytes
flow.udps.src_to_dst_second_bytes = scb_s2d
flow.udps.dst_to_src_second_bytes = scb_d2s
flow.udps.src_to_dst_avg_throughput = thpt_s2d
flow.udps.dst_to_src_avg_throughput = thpt_d2s
flow.udps.src_to_dst_second_bytes2 = flow.src2dst_bytes/(flow.src2dst_duration_ms/1000) if flow.src2dst_duration_ms > 0 else flow.src2dst_bytes
flow.udps.dst_to_src_second_bytes2 = flow.dst2src_bytes/(flow.dst2src_duration_ms/1000) if flow.dst2src_duration_ms > 0 else flow.dst2src_bytes
flow.udps.src_to_dst_avg_throughput2 = (8 * flow.src2dst_bytes/(flow.src2dst_duration_ms/1000)) if flow.src2dst_duration_ms > 0 else (8 * flow.src2dst_bytes)
flow.udps.dst_to_src_avg_throughput2 = (8 * flow.dst2src_bytes/(flow.dst2src_duration_ms/1000)) if flow.dst2src_duration_ms > 0 else (8 * flow.dst2src_bytes)
| 51.062241
| 165
| 0.578092
| 1,733
| 12,306
| 3.78015
| 0.055395
| 0.042742
| 0.054953
| 0.045795
| 0.940467
| 0.922607
| 0.890093
| 0.798504
| 0.736529
| 0.721875
| 0
| 0.075808
| 0.321469
| 12,306
| 240
| 166
| 51.275
| 0.708743
| 0.134406
| 0
| 0.793651
| 0
| 0
| 0.039253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026455
| false
| 0
| 0.010582
| 0
| 0.047619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
214435ef3f118944d80f807fe3aec435d4c52a90
| 119
|
py
|
Python
|
layers/__init__.py
|
mgmk2/SpectralNormalization
|
013839a53ba4abb8d9f633af67430fa660f95a1e
|
[
"Apache-2.0"
] | 1
|
2020-08-07T18:31:07.000Z
|
2020-08-07T18:31:07.000Z
|
layers/__init__.py
|
mgmk2/SpectralNormalization
|
013839a53ba4abb8d9f633af67430fa660f95a1e
|
[
"Apache-2.0"
] | null | null | null |
layers/__init__.py
|
mgmk2/SpectralNormalization
|
013839a53ba4abb8d9f633af67430fa660f95a1e
|
[
"Apache-2.0"
] | null | null | null |
from .spectral_normalization_conv import SNConv1D, SNConv2D, SNConv3D
from .spectral_normalization_core import SNDense
| 39.666667
| 69
| 0.882353
| 14
| 119
| 7.214286
| 0.714286
| 0.237624
| 0.49505
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027523
| 0.084034
| 119
| 2
| 70
| 59.5
| 0.899083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.