hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7ff328cc447391f30177e62770d32ee32c9f380 | 2,545 | py | Python | pacifique/views.py | rogeruwayezu/pacifique_IO | 4e8216a945336f03fac4c97f1dc4868460d96b0a | [
"MIT"
] | null | null | null | pacifique/views.py | rogeruwayezu/pacifique_IO | 4e8216a945336f03fac4c97f1dc4868460d96b0a | [
"MIT"
] | 8 | 2020-03-16T07:43:58.000Z | 2022-02-10T13:59:41.000Z | pacifique/views.py | rogeruwayezu/pacifique_IO | 4e8216a945336f03fac4c97f1dc4868460d96b0a | [
"MIT"
] | null | null | null | from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from .forms import NewArticleForm, UpdateArticleForm
from .models import Article
# pagination
from django.core.paginator import Paginator
# Create your views here.
def home(request):
articles = Article.objects.all().order_by('-pub_date')[:3]
return render(request, 'main/home.html', {"articles": articles})
def articles(request):
articles_list = Article.objects.all()
paginator = Paginator(articles_list, 3) # Show 25 contacts per page.
page_number = request.GET.get('page')
articles = paginator.get_page(page_number)
return render(request, 'main/articles.html', {"articles": articles})
def article_display(request, article_id):
try:
article = Article.objects.get(id=article_id)
except Article.DoesNotExist:
raise Http404()
return render(request, 'main/article_display.html', {"article": article})
@login_required(login_url='/accounts/login/')
def new_article(request):
current_user = request.user
if request.method == 'POST':
form = NewArticleForm(request.POST, request.FILES)
if form.is_valid():
article = form.save(commit=False)
article.editor = current_user
article.save()
return redirect('articles')
else:
form = NewArticleForm()
return render(request, 'main/new_article.html', {"form": form})
@login_required(login_url='login/')
def update_article(request, article_id):
current_user=request.user
if request.method =='POST':
if Article.objects.filter(id=article_id).exists():
form = NewArticleForm(request.POST, request.FILES, instance=Article.objects.get(id=article_id))
if form.is_valid():
article = form.save(commit=False)
article.editor = current_user
article.save()
print('hellooooooooooooha' + article.title)
return redirect('articles')
else:
if Article.objects.filter(id = article_id).exists():
form = NewArticleForm(instance=Article.objects.get(id=article_id))
article = Article.objects.get(id=article_id)
# print('helloooooooooooo' + article.title)
return render(request,'main/update_article.html',{"form":form, "article_id":article_id})
def contact(request):
return render(request, 'main/contact.html') | 34.863014 | 107 | 0.675835 | from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from .forms import NewArticleForm, UpdateArticleForm
from .models import Article
from django.core.paginator import Paginator
def home(request):
articles = Article.objects.all().order_by('-pub_date')[:3]
return render(request, 'main/home.html', {"articles": articles})
def articles(request):
articles_list = Article.objects.all()
paginator = Paginator(articles_list, 3)
page_number = request.GET.get('page')
articles = paginator.get_page(page_number)
return render(request, 'main/articles.html', {"articles": articles})
def article_display(request, article_id):
try:
article = Article.objects.get(id=article_id)
except Article.DoesNotExist:
raise Http404()
return render(request, 'main/article_display.html', {"article": article})
@login_required(login_url='/accounts/login/')
def new_article(request):
current_user = request.user
if request.method == 'POST':
form = NewArticleForm(request.POST, request.FILES)
if form.is_valid():
article = form.save(commit=False)
article.editor = current_user
article.save()
return redirect('articles')
else:
form = NewArticleForm()
return render(request, 'main/new_article.html', {"form": form})
@login_required(login_url='login/')
def update_article(request, article_id):
current_user=request.user
if request.method =='POST':
if Article.objects.filter(id=article_id).exists():
form = NewArticleForm(request.POST, request.FILES, instance=Article.objects.get(id=article_id))
if form.is_valid():
article = form.save(commit=False)
article.editor = current_user
article.save()
print('hellooooooooooooha' + article.title)
return redirect('articles')
else:
if Article.objects.filter(id = article_id).exists():
form = NewArticleForm(instance=Article.objects.get(id=article_id))
article = Article.objects.get(id=article_id)
return render(request,'main/update_article.html',{"form":form, "article_id":article_id})
def contact(request):
return render(request, 'main/contact.html') | true | true |
f7ff33a3db450085d2673e8f5977ed1eac9524d3 | 821 | py | Python | tools/exportcsv/headers.py | DaleProctor/tscharts | 5447395e0aef0b949bef8426febdec2093cf37ef | [
"Apache-2.0"
] | null | null | null | tools/exportcsv/headers.py | DaleProctor/tscharts | 5447395e0aef0b949bef8426febdec2093cf37ef | [
"Apache-2.0"
] | null | null | null | tools/exportcsv/headers.py | DaleProctor/tscharts | 5447395e0aef0b949bef8426febdec2093cf37ef | [
"Apache-2.0"
] | null | null | null | #(C) Copyright Syd Logan 2021
#(C) Copyright Thousand Smiles Foundation 2021
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import sys
lines = tuple(open(sys.argv[1], 'r'))
count = 0
ret = ""
for x in lines[1:]:
f = x.split('\t')
ret += f[0]
if count < len(lines) - 2:
ret += ','
count = count + 1
print(ret)
| 29.321429 | 73 | 0.706456 |
import sys
lines = tuple(open(sys.argv[1], 'r'))
count = 0
ret = ""
for x in lines[1:]:
f = x.split('\t')
ret += f[0]
if count < len(lines) - 2:
ret += ','
count = count + 1
print(ret)
| true | true |
f7ff33ba39f9a3e937cd41911d9c139c1e6e035d | 1,530 | py | Python | core/providers/soundcloud.py | telegrambotdev/music-share-bot | 59dc8592da45186b237505093b229b01c1cd2e4c | [
"MIT"
] | 13 | 2019-03-18T11:39:20.000Z | 2021-01-07T13:15:42.000Z | core/providers/soundcloud.py | telegrambotdev/music-share-bot | 59dc8592da45186b237505093b229b01c1cd2e4c | [
"MIT"
] | 24 | 2019-03-18T10:45:51.000Z | 2021-12-13T20:27:03.000Z | core/providers/soundcloud.py | telegrambotdev/music-share-bot | 59dc8592da45186b237505093b229b01c1cd2e4c | [
"MIT"
] | 7 | 2019-03-17T11:21:14.000Z | 2020-06-03T19:10:25.000Z | import os
import requests
from bs4 import BeautifulSoup
from core.providers.base import MusicProvider
SOUNDCLOUD_CLIENT_ID = os.environ.get('SOUNDCLOUD_CLIENT_ID')
class SoundCloud(MusicProvider):
NAME = 'SoundCloud'
_MUSIC_URL = 'https://soundcloud.com/{}/{}'
def get_music_name(self, url):
soundcloud_page = requests.get(url)
soup = BeautifulSoup(soundcloud_page.content, 'html.parser')
title_and_artist_tag = soup.find('title')
if title_and_artist_tag:
song_info = title_and_artist_tag.text.split('|')[0]
artist_and_title = song_info.split(' by ')[0]
# it is my observation, could be just some garbage in the name
if len(artist_and_title) > 40:
title = artist_and_title.split(' - ')[1]
return f'{title}'
return f'{artist_and_title}'
def get_music_url(self, name):
api_url = 'https://api-v2.soundcloud.com/search'
params = {
'q': name,
'client_id': SOUNDCLOUD_CLIENT_ID,
'limit': 1,
}
resp = requests.get(url=api_url, params=params)
resp.raise_for_status()
data = resp.json()
user = data['collection'][0]['user']['permalink']
track_link = data['collection'][0]['permalink']
url = self._MUSIC_URL.format(user, track_link)
return url
@classmethod
def is_music_url(self, url):
if 'soundcloud' in url:
return True
return False
| 28.867925 | 74 | 0.607843 | import os
import requests
from bs4 import BeautifulSoup
from core.providers.base import MusicProvider
SOUNDCLOUD_CLIENT_ID = os.environ.get('SOUNDCLOUD_CLIENT_ID')
class SoundCloud(MusicProvider):
NAME = 'SoundCloud'
_MUSIC_URL = 'https://soundcloud.com/{}/{}'
def get_music_name(self, url):
soundcloud_page = requests.get(url)
soup = BeautifulSoup(soundcloud_page.content, 'html.parser')
title_and_artist_tag = soup.find('title')
if title_and_artist_tag:
song_info = title_and_artist_tag.text.split('|')[0]
artist_and_title = song_info.split(' by ')[0]
if len(artist_and_title) > 40:
title = artist_and_title.split(' - ')[1]
return f'{title}'
return f'{artist_and_title}'
def get_music_url(self, name):
api_url = 'https://api-v2.soundcloud.com/search'
params = {
'q': name,
'client_id': SOUNDCLOUD_CLIENT_ID,
'limit': 1,
}
resp = requests.get(url=api_url, params=params)
resp.raise_for_status()
data = resp.json()
user = data['collection'][0]['user']['permalink']
track_link = data['collection'][0]['permalink']
url = self._MUSIC_URL.format(user, track_link)
return url
@classmethod
def is_music_url(self, url):
if 'soundcloud' in url:
return True
return False
| true | true |
f7ff34d3862feee34e87b39ce28ebbe7138da441 | 1,992 | py | Python | tests/test_basic_events.py | uzsolt/herbstluftwm | 49b6dcad50452d79783576236df2ce670672c10a | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/test_basic_events.py | uzsolt/herbstluftwm | 49b6dcad50452d79783576236df2ce670672c10a | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/test_basic_events.py | uzsolt/herbstluftwm | 49b6dcad50452d79783576236df2ce670672c10a | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import pytest
import test_stack
@pytest.mark.parametrize("single_floating", [True, False])
@pytest.mark.parametrize("raise_on_click", [True, False])
def test_focus_on_click(hlwm, mouse, raise_on_click, single_floating):
if single_floating:
hlwm.call('rule floating=on')
else:
hlwm.call('set_attr tags.focus.floating on')
hlwm.call(['set', 'raise_on_click', hlwm.bool(raise_on_click)])
c1, _ = hlwm.create_client(position=(0, 0))
c2, _ = hlwm.create_client(position=(300, 0))
hlwm.call(f'jumpto {c2}') # also raises c2
assert hlwm.get_attr('clients.focus.winid') == c2
assert test_stack.helper_get_stack_as_list(hlwm) == [c2, c1]
mouse.click('1', into_win_id=c1)
assert hlwm.get_attr('clients.focus.winid') == c1
stack = test_stack.helper_get_stack_as_list(hlwm)
if raise_on_click:
# c1 gets back on top
assert stack == [c1, c2]
else:
# c2 stays on top
assert stack == [c2, c1]
@pytest.mark.parametrize("single_floating", [True, False])
@pytest.mark.parametrize("focus_follows_mouse", [True, False])
def test_focus_follows_mouse(hlwm, mouse, focus_follows_mouse, single_floating):
if single_floating:
hlwm.call('rule floating=on')
else:
hlwm.call('set_attr tags.focus.floating on')
hlwm.call('set_attr tags.focus.floating on')
hlwm.call(['set', 'focus_follows_mouse', hlwm.bool(focus_follows_mouse)])
c1, _ = hlwm.create_client(position=(0, 0))
c2, _ = hlwm.create_client(position=(300, 0))
hlwm.call(f'jumpto {c2}') # also raises c2
assert hlwm.get_attr('clients.focus.winid') == c2
assert test_stack.helper_get_stack_as_list(hlwm) == [c2, c1]
mouse.move_into(c1)
c1_is_focused = hlwm.get_attr('clients.focus.winid') == c1
# c1 is focused iff focus_follows_mouse was activated
assert c1_is_focused == focus_follows_mouse
# stacking is unchanged
assert test_stack.helper_get_stack_as_list(hlwm) == [c2, c1]
| 37.584906 | 80 | 0.692269 | import pytest
import test_stack
@pytest.mark.parametrize("single_floating", [True, False])
@pytest.mark.parametrize("raise_on_click", [True, False])
def test_focus_on_click(hlwm, mouse, raise_on_click, single_floating):
if single_floating:
hlwm.call('rule floating=on')
else:
hlwm.call('set_attr tags.focus.floating on')
hlwm.call(['set', 'raise_on_click', hlwm.bool(raise_on_click)])
c1, _ = hlwm.create_client(position=(0, 0))
c2, _ = hlwm.create_client(position=(300, 0))
hlwm.call(f'jumpto {c2}')
assert hlwm.get_attr('clients.focus.winid') == c2
assert test_stack.helper_get_stack_as_list(hlwm) == [c2, c1]
mouse.click('1', into_win_id=c1)
assert hlwm.get_attr('clients.focus.winid') == c1
stack = test_stack.helper_get_stack_as_list(hlwm)
if raise_on_click:
assert stack == [c1, c2]
else:
assert stack == [c2, c1]
@pytest.mark.parametrize("single_floating", [True, False])
@pytest.mark.parametrize("focus_follows_mouse", [True, False])
def test_focus_follows_mouse(hlwm, mouse, focus_follows_mouse, single_floating):
if single_floating:
hlwm.call('rule floating=on')
else:
hlwm.call('set_attr tags.focus.floating on')
hlwm.call('set_attr tags.focus.floating on')
hlwm.call(['set', 'focus_follows_mouse', hlwm.bool(focus_follows_mouse)])
c1, _ = hlwm.create_client(position=(0, 0))
c2, _ = hlwm.create_client(position=(300, 0))
hlwm.call(f'jumpto {c2}')
assert hlwm.get_attr('clients.focus.winid') == c2
assert test_stack.helper_get_stack_as_list(hlwm) == [c2, c1]
mouse.move_into(c1)
c1_is_focused = hlwm.get_attr('clients.focus.winid') == c1
assert c1_is_focused == focus_follows_mouse
assert test_stack.helper_get_stack_as_list(hlwm) == [c2, c1]
| true | true |
f7ff357bc7e3caa029e96d1817df3cd7eb4be03d | 979 | py | Python | pretix_sepadebit/migrations/0001_initial.py | chr-chr/pretix-sepadebit | 153871261c34b8ec560a101c69a1b17dcb66a5c4 | [
"Apache-2.0"
] | 6 | 2017-04-09T17:08:18.000Z | 2019-03-15T14:01:23.000Z | pretix_sepadebit/migrations/0001_initial.py | chr-chr/pretix-sepadebit | 153871261c34b8ec560a101c69a1b17dcb66a5c4 | [
"Apache-2.0"
] | 7 | 2019-03-12T06:07:50.000Z | 2022-02-23T08:17:01.000Z | pretix_sepadebit/migrations/0001_initial.py | chr-chr/pretix-sepadebit | 153871261c34b8ec560a101c69a1b17dcb66a5c4 | [
"Apache-2.0"
] | 7 | 2017-07-15T23:52:10.000Z | 2021-11-15T15:44:42.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-21 11:45
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('pretixbase', '0051_auto_20170206_2027_squashed_0057_auto_20170501_2116'),
]
operations = [
migrations.CreateModel(
name='SepaExport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('xmldata', models.TextField()),
('datetime', models.DateTimeField(auto_now_add=True)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sepa_exports', to='pretixbase.Event')),
('orders', models.ManyToManyField(related_name='sepa_exports', to='pretixbase.Order')),
],
),
]
| 33.758621 | 142 | 0.638407 |
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('pretixbase', '0051_auto_20170206_2027_squashed_0057_auto_20170501_2116'),
]
operations = [
migrations.CreateModel(
name='SepaExport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('xmldata', models.TextField()),
('datetime', models.DateTimeField(auto_now_add=True)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sepa_exports', to='pretixbase.Event')),
('orders', models.ManyToManyField(related_name='sepa_exports', to='pretixbase.Order')),
],
),
]
| true | true |
f7ff36fbfcf6357bd7adba2d00deac677e3704ae | 866 | py | Python | src/heap/max_heapify.py | dmvieira/algs | bf88d026ecf7210f0a1d601b36d7fc82364bd46f | [
"Apache-2.0"
] | null | null | null | src/heap/max_heapify.py | dmvieira/algs | bf88d026ecf7210f0a1d601b36d7fc82364bd46f | [
"Apache-2.0"
] | null | null | null | src/heap/max_heapify.py | dmvieira/algs | bf88d026ecf7210f0a1d601b36d7fc82364bd46f | [
"Apache-2.0"
] | null | null | null | import math
class MaxHeapify(object):
def __init__(self, array):
self.array = array
def build(self):
array = self.array
size = len(array)
for key in reversed(range(math.ceil(size/2))):
self.max(array, key)
return array
def max(self, array=None, i=0, heap_size=None):
array = array or self.array
n = len(array) if heap_size is None else heap_size
largest = i
left = 2*i + 1
right = 2*i + 2
if n > left and array[left] > array[largest]:
largest = left
if n > right and array[right] > array[largest]:
largest = right
if largest != i:
temp = array[largest]
array[largest] = array[i]
array[i] = temp
self.max(array, largest, n)
return array | 26.242424 | 58 | 0.52194 | import math
class MaxHeapify(object):
def __init__(self, array):
self.array = array
def build(self):
array = self.array
size = len(array)
for key in reversed(range(math.ceil(size/2))):
self.max(array, key)
return array
def max(self, array=None, i=0, heap_size=None):
array = array or self.array
n = len(array) if heap_size is None else heap_size
largest = i
left = 2*i + 1
right = 2*i + 2
if n > left and array[left] > array[largest]:
largest = left
if n > right and array[right] > array[largest]:
largest = right
if largest != i:
temp = array[largest]
array[largest] = array[i]
array[i] = temp
self.max(array, largest, n)
return array | true | true |
f7ff37edbf2d3c1d03287b98a1a98c0895c4ca75 | 70 | py | Python | reptify/elements/__init__.py | YunisDEV/reptilia | 3bb025177df9847f86290665476604dd4c4e49ad | [
"MIT"
] | null | null | null | reptify/elements/__init__.py | YunisDEV/reptilia | 3bb025177df9847f86290665476604dd4c4e49ad | [
"MIT"
] | null | null | null | reptify/elements/__init__.py | YunisDEV/reptilia | 3bb025177df9847f86290665476604dd4c4e49ad | [
"MIT"
] | null | null | null | from .elements import (
ElementSet,
Element as ElementBase
)
| 14 | 26 | 0.685714 | from .elements import (
ElementSet,
Element as ElementBase
)
| true | true |
f7ff3a0add55da8d068e7d4a0aa94888934f9da6 | 605 | py | Python | src/bio2bel_pfam/constants.py | bio2bel/pfam | 2e18037aa12986f27458a12cc703b21f0786d3f5 | [
"MIT"
] | null | null | null | src/bio2bel_pfam/constants.py | bio2bel/pfam | 2e18037aa12986f27458a12cc703b21f0786d3f5 | [
"MIT"
] | null | null | null | src/bio2bel_pfam/constants.py | bio2bel/pfam | 2e18037aa12986f27458a12cc703b21f0786d3f5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Constants for Bio2BEL PFAM."""
import os
from bio2bel import get_data_dir
__all__ = [
'VERSION',
'MODULE_NAME',
'DATA_DIR',
'CLAN_MAPPING_URL',
'CLAN_MAPPING_PATH',
'CLAN_MAPPING_HEADER',
]
VERSION = '0.0.2-dev'
MODULE_NAME = 'pfam'
DATA_DIR = get_data_dir(MODULE_NAME)
CLAN_MAPPING_URL = 'ftp://ftp.ebi.ac.uk/pub/databases/Pfam/current_release/Pfam-A.clans.tsv.gz'
CLAN_MAPPING_PATH = os.path.join(DATA_DIR, 'Pfam-A.clans.tsv.gz')
CLAN_MAPPING_HEADER = [
'family_id',
'clan_id',
'clan_name',
'family_name',
'family_summary',
]
| 19.516129 | 95 | 0.67438 |
import os
from bio2bel import get_data_dir
__all__ = [
'VERSION',
'MODULE_NAME',
'DATA_DIR',
'CLAN_MAPPING_URL',
'CLAN_MAPPING_PATH',
'CLAN_MAPPING_HEADER',
]
VERSION = '0.0.2-dev'
MODULE_NAME = 'pfam'
DATA_DIR = get_data_dir(MODULE_NAME)
CLAN_MAPPING_URL = 'ftp://ftp.ebi.ac.uk/pub/databases/Pfam/current_release/Pfam-A.clans.tsv.gz'
CLAN_MAPPING_PATH = os.path.join(DATA_DIR, 'Pfam-A.clans.tsv.gz')
CLAN_MAPPING_HEADER = [
'family_id',
'clan_id',
'clan_name',
'family_name',
'family_summary',
]
| true | true |
f7ff3ad1e775a08088997f217dbf9edf54e805cf | 14,999 | py | Python | Python/TBot_Joystick_Python_PYBLUEZ/Controller.py | garethnisbet/T-BOTS | 70e211191cc6c713084836bff89241e811667378 | [
"Apache-2.0"
] | 20 | 2018-07-16T21:34:35.000Z | 2022-01-07T02:33:10.000Z | Python/TBot_Joystick_Python_PYBLUEZ/Controller.py | garethnisbet/T-BOTS | 70e211191cc6c713084836bff89241e811667378 | [
"Apache-2.0"
] | 5 | 2018-07-02T23:00:36.000Z | 2020-01-23T17:38:32.000Z | Python/TBot_Joystick_Python_PYBLUEZ/Controller.py | garethnisbet/T-BOTS | 70e211191cc6c713084836bff89241e811667378 | [
"Apache-2.0"
] | 10 | 2018-05-15T10:38:40.000Z | 2021-06-03T07:07:21.000Z | import pygame, sys, pygame.mixer
from pygame.locals import *
import socket
from time import sleep, time
import bluetooth as bt
print('-----------------------------------------------------------------')
print('Controls:\nClick and drag joystick to drive the T-Bot\nUse up, down, left, right arrow keys to drive the T-Bot\nPress w or s to change the speed factor for arrow controls.\nClick on plot or press c to clear plots\nPress q or Esc to quit or Ctrl c in this window\n')
print('-----------------------------------------------------------------\n\n\n')
################### Connection #############################
search = False
if search == True:
print('Searching for devices...')
print("")
nearby_devices = bt.discover_devices()
#Run through all the devices found and list their name
num = 0
for i in nearby_devices:
num+=1
print(num , ": " , bt.lookup_name( i ))
print('Select your device by entering its coresponding number...')
selection = input("> ") - 1
print('You have selected - '+bt.lookup_name(nearby_devices[selection]))
bd_addr = nearby_devices[selection]
else:
#bd_addr = '98:D3:51:FD:81:AC'
bd_addr = '98:D3:91:FD:46:C9'
print('connecting...')
error = 1
port = 1
while error:
try:
sock = bt.BluetoothSocket( bt.RFCOMM )
sock.connect((bd_addr,1))
sock.settimeout(5)
error = 0
print('connected to '+bd_addr)
except:
print('Trying again...')
sock.close()
error = 1
sleep(2)
########################## functions #####################################
def send(sendstr):
global timestart
try:
builtstr = chr(0X02)+sendstr+chr(0X03)
sock.send(builtstr.encode(encoding='utf-8'))
if cmdwrite:
f2.write(str(time()-timestart)+','+sendstr+'\n')
except:
sock.close()
pygame.display.quit()
sys.exit()
timestart = time()
def playmacro(filename):
try:
ff = open(filename)
cmd_data = ff.readlines()
ff.close()
for ii in range(len(cmd_data)):
aa = cmd_data[ii].split(',')
dtime = float(aa[0])
cmsstr = aa[1]
sleep(dtime)
send(cmsstr)
except:
print('The cmd.csv file does not exist. Try recording a macro first.')
def parse():
global oldkps
global oldkp
global oldtrim
global oldgyro
global toggle
try:
dataraw = sock.recv(32).decode(encoding='utf-8')
datastripped = dataraw.strip('\x03\x02').split('\x03\x02')
if datastripped[0] == '':
dataraw = sock.recv(32).decode(encoding='utf-8')
datastripped = dataraw.strip('\x03\x02').split('\x03\x02')
ministring = datastripped[0]
splitstr = ministring.split(',')
oldkps, oldkp, oldtrim, oldgyro = splitstr[0], splitstr[1], splitstr[2], splitstr[3]
oldgyro = oldgyro[:-2]
if toggle == 1:
print('writing...')
f.write(oldkps+','+oldkp+','+oldtrim+','+oldgyro+'\n')
return oldkps, oldkp, oldtrim, float(oldgyro)
except:
try:
return oldkps, oldkp, oldtrim, float(oldgyro)
except:
return oldkps, oldkp, oldtrim, 0
################### Setup Pygame ########################
pygame.font.init()
basicfont = pygame.font.SysFont(None, 30)
oldkps, oldkp, oldtrim, oldgyro = str(0),str(0),str(0), str(0)
pygame.init()
clock = pygame.time.Clock()
size = width, height = 1200, 500
screen=pygame.display.set_mode(size)
############ Load art work ##############################
joytop = pygame.image.load('images/joytopglow.png')
joybase = pygame.image.load('images/joybase.png')
minus = pygame.image.load('images/minus.png')
plus = pygame.image.load('images/plus.png')
pluslight = pygame.image.load('images/pluslight.png')
minuslight = pygame.image.load('images/minuslight.png')
gTrim = pygame.image.load('images/Trim.png')
gTrimlight = pygame.image.load('images/Trimlight.png')
record = pygame.image.load('images/record.png')
pause = pygame.image.load('images/pause.png')
stop = pygame.image.load('images/stop.png')
play = pygame.image.load('images/play.png')
cmdpause = pygame.image.load('images/cmdpause.png')
cmdrecord = pygame.image.load('images/cmdrecord.png')
trash = pygame.image.load('images/trash.png')
trashlight = pygame.image.load('images/trashlight.png')
######################## initialize variables #################
timestart = time()
cmdwrite = 0
button1,button2,button3,button4,button5,button6,button7, button8, button9 ,button10, button11 ,button12 , toggle, toggle2, toggle3 = 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
x,y = 0,0
colour = (0,0,0,0)
linecolor = 255, 0, 0
plotcolours = [(0, 255, 0),(255, 0, 0),(0, 0, 255),(255, 255, 0),(255, 0, 255), (0,255,255)]
iicolour = 0
textcolour = (255,255, 255)
mx,my = 0,0
mxnew, mynew = 250, 250
oldgyrodata = 0
ii=800
speedfactor = 0.5
pygame.draw.lines(screen, (255,255,255), False, ((800,100), (1160,100), (1160,400),(800,400),(800,100)),1)
f= open('plot.csv','w')
Play = 0
cmdindex = 0
############## Start main loop ###################################
while True: # Continuous Pygame loop,
pygame.display.update((800,0,1200,500))
xstr, ystr = '200', '200'
kps, kp, trim, gyrodata = parse()
if gyrodata > 298:
gyrodata = 298
if gyrodata < 0:
gyrodata = 0
pygame.draw.lines(screen, plotcolours[iicolour], False, ((ii,oldgyrodata+101), (ii+1,gyrodata+101)),1)
oldgyrodata = gyrodata
kpstext = basicfont.render('KPS '+kps, True, textcolour)
kptext = basicfont.render('KP ' +kp, True, textcolour)
trimtext = basicfont.render('TRIM '+trim, True, textcolour)
speedfactortext = basicfont.render('Speed Factor '+str(speedfactor), True, textcolour)
mx,my = pygame.mouse.get_pos()
p2x = mx
p2y = my
#print('x '+str(mx)+' y ' +str(my))
c1, c2, c3 = pygame.mouse.get_pressed()
if mx > 480 or mx < 20 or my > 480 or my < 20:
mx,my = 250,250
jx = int(((mx-250)*0.43)+200)
jy = int(((250-my)*0.43)+200)
if mxnew != mx or mynew != my:
sendstring = chr(0X02)+str(jx)+str(jy)+chr(0X03)
if c1==1:
send(sendstring)
else:
send(chr(0X02)+'200200Z'+chr(0X03))
mxnew = mx
mynew = my
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
sys.exit()
elif event.type == MOUSEBUTTONDOWN:
kps, kp, trim, gyrodata = parse()
# if p2x > 0 and p2x < 500 and p2y > 0 and p2y < 500:
# mx, my = 250,250
if p2x > 680 and p2x < 706 and p2y > 100 and p2y < 123:
button1 = 1
if p2x > 680 and p2x < 706 and p2y > 130 and p2y < 153:
button2 = 1
if p2x > 680 and p2x < 706 and p2y > 230 and p2y < 253:
button3 = 1
if p2x > 680 and p2x < 706 and p2y > 260 and p2y < 283:
button4 = 1
if p2x > 580 and p2x < 706 and p2y > 360 and p2y < 383:
button5 = 1
if p2x > 680 and p2x < 706 and p2y > 390 and p2y < 413:
button6 = 1
if p2x > 720 and p2x < 740 and p2y > 375 and p2y < 395:
button7 = 1
if p2x > 800 and p2x < 1200 and p2y > 100 and p2y < 400:
button8 = 1
if p2x > 1120 and p2x < 1150 and p2y > 420 and p2y < 450:
button9 = 1
if p2x > 860 and p2x < 894 and p2y > 40 and p2y < 75:
button10 = 1
if p2x > 960 and p2x < 994 and p2y > 40 and p2y < 75:
button11 = 1
if p2x > 1060 and p2x < 1094 and p2y > 40 and p2y < 75:
button12 = 1
elif event.type == MOUSEBUTTONUP:
button1 = 0
button2 = 0
button3 = 0
button4 = 0
button5 = 0
button6 = 0
button7 = 0
button8 = 0
button9 = 0
button10 = 0
button11 = 0
button12 = 0
if event.type == KEYDOWN and event.key == K_c:
screen.fill(colour,(800,100,1200,402))
pygame.draw.lines(screen, (255,255,255), False, ((800,100), (1160,100), (1160,400),(800,400),(800,100)),1)
iicolour = 0
ii = 800
keys = pygame.key.get_pressed()
if keys[K_RIGHT] and keys[K_UP]:
send('%03d%03dZ'%(240,200+(speedfactor*100)))
elif keys[K_LEFT] and keys[K_UP]:
send('%03d%03dZ'%(160,200+(speedfactor*100)))
elif keys[K_RIGHT] and keys[K_DOWN]:
send('%03d%03dZ'%(260,200-(speedfactor*100)))
elif keys[K_LEFT] and keys[K_DOWN]:
send('%03d%03dZ'%(140,200-(speedfactor*100)))
elif keys[K_DOWN]:
send('%03d%03dZ'%(200,200-(speedfactor*100)))
elif keys[K_UP]:
send('%03d%03dZ'%(200,200+(speedfactor*100)))
elif keys[K_RIGHT]:
send('260200Z')
elif keys[K_LEFT]:
send('140200Z')
elif keys[K_w]:
speedfactor +=0.1
if speedfactor > 1:
speedfactor = 1.0
elif keys[K_s]:
speedfactor -=0.1
if speedfactor < 0.1:
speedfactor = 0.1
else:
if c1==0:
send('200200Z')
if event.type == KEYDOWN and event.key == K_ESCAPE:
sock.close()
f.close()
print('Your now disconnected.')
pygame.display.quit()
sys.exit()
elif event.type == KEYDOWN and event.key == K_q:
sock.close()
pygame.display.quit()
f.close()
print('Your now disconnected.')
sys.exit()
screen.fill(colour,(0,0,800,500)) # Joystick
screen.fill(colour,(1116,410,1146,440))
screen.fill(colour,(800,420,1146,500))
screen.blit(joybase,(250-230,250-230))
screen.blit(joytop,(mx-75,my-75))
screen.blit(plus,(680,100))
screen.blit(minus,(680,130))
screen.blit(plus,(680,230))
screen.blit(minus,(680,260))
screen.blit(plus,(680,360))
screen.blit(minus,(680,390))
screen.blit(gTrim,(720,375))
if button1:
screen.blit(pluslight,(680-3,100-3))
buttonstring = '200200B'
send(buttonstring)
if button2:
screen.blit(minuslight,(680-3,130-3))
buttonstring2 = '200200A'
send(buttonstring2)
if button3:
screen.blit(pluslight,(680-3,230-3))
buttonstring3 = '200200D'
send(buttonstring3)
if button4:
screen.blit(minuslight,(680-3,260-3))
buttonstring4 = '200200C'
send(buttonstring4)
if button5:
screen.blit(pluslight,(680-3,360-3))
buttonstring5 = '200200F'
send(buttonstring5)
if button6:
screen.blit(minuslight,(680-3,390-3))
buttonstring6 = '200200E'
send(buttonstring6)
if button7:
screen.blit(gTrimlight,(720-2,375-2))
buttonstring7 = '200200T'
send(buttonstring7)
if button8:
screen.fill(colour,(800,100,1200,500))
pygame.draw.lines(screen, (255,255,255), False, ((800,100), (1160,100), (1160,400),(800,400),(800,100)),1)
iicolour = 0
ii = 800
if button12:
screen.blit(trashlight,(1060-3,40-3))
f2= open('cmd.csv','w')
f2.close()
##################### Data record logic ###########################
if button9==0 and toggle == 0:
screen.fill(colour,(1116,410,1146,440))
screen.blit(record,(1120,420))
if toggle == 0:
if button9==1:
f= open('plot.csv','a')
screen.fill(colour,(1116,410,1146,440))
screen.blit(pause,(1120,420))
toggle = 1
elif toggle == 1:
if button9==1:
f.close()
toggle = 0
if toggle:
screen.fill(colour,(1116,410,1146,400))
screen.blit(pause,(1120,420))
else:
screen.fill(colour,(1116,410,1146,400))
screen.blit(record,(1120,420))
##################### Command play logic ###########################
if button10==0 and toggle2 == 0:
screen.fill(colour,(860,40,900,44))
screen.blit(play,(860,40))
if toggle2 == 0:
if button10==1:
Play = 1
cmdindex = 0
toggle2 = 1
elif toggle2 == 1:
if button10==1:
Play = 0
toggle2 = 0
if toggle2:
screen.fill(colour,(860,40,900,44))
screen.blit(stop,(860,40))
Play = 1
else:
screen.fill(colour,(860,40,900,44))
screen.blit(play,(860,40))
Play = 0
##################### Command record logic ###########################
if button11==0 and toggle3 == 0:
screen.fill(colour,(960,40,965,44))
screen.blit(cmdrecord,(960,40))
if toggle3 == 0:
if button11==1:
f2= open('cmd.csv','a')
timestart = time()
cmdwrite = 1
screen.fill(colour,(960,40,965,44))
screen.blit(cmdpause,(960,40))
toggle3 = 1
elif toggle3 == 1:
if button11==1:
cmdwrite = 0
f2.close()
toggle3 = 0
if toggle3:
screen.fill(colour,(960,40,965,44))
screen.blit(cmdpause,(960,40))
else:
screen.fill(colour,(960,40,965,44))
screen.blit(cmdrecord,(960,40))
screen.blit(trash,(1060,40))
if button12:
screen.blit(trashlight,(1060-3,40-3))
################ Play loop ###########################
screen.blit(kpstext,(560,115))
screen.blit(kptext,(560,245))
screen.blit(trimtext,(560,375))
screen.blit(joytop,(mx-75,my-75))
screen.blit(speedfactortext,(800,420))
if Play:
playmacro('cmd.csv')
Play = 0
button10 = 0
toggle2 = 0
ii+=1
if ii > 1159:
iicolour+=1
ii = 801
if iicolour > 5:
iicolour = 0
pygame.display.update()
| 29.70099 | 272 | 0.507634 | import pygame, sys, pygame.mixer
from pygame.locals import *
import socket
from time import sleep, time
import bluetooth as bt
print('-----------------------------------------------------------------')
print('Controls:\nClick and drag joystick to drive the T-Bot\nUse up, down, left, right arrow keys to drive the T-Bot\nPress w or s to change the speed factor for arrow controls.\nClick on plot or press c to clear plots\nPress q or Esc to quit or Ctrl c in this window\n')
print('-----------------------------------------------------------------\n\n\n')
e()
pygame.display.quit()
f.close()
print('Your now disconnected.')
sys.exit()
screen.fill(colour,(0,0,800,500))
screen.fill(colour,(1116,410,1146,440))
screen.fill(colour,(800,420,1146,500))
screen.blit(joybase,(250-230,250-230))
screen.blit(joytop,(mx-75,my-75))
screen.blit(plus,(680,100))
screen.blit(minus,(680,130))
screen.blit(plus,(680,230))
screen.blit(minus,(680,260))
screen.blit(plus,(680,360))
screen.blit(minus,(680,390))
screen.blit(gTrim,(720,375))
if button1:
screen.blit(pluslight,(680-3,100-3))
buttonstring = '200200B'
send(buttonstring)
if button2:
screen.blit(minuslight,(680-3,130-3))
buttonstring2 = '200200A'
send(buttonstring2)
if button3:
screen.blit(pluslight,(680-3,230-3))
buttonstring3 = '200200D'
send(buttonstring3)
if button4:
screen.blit(minuslight,(680-3,260-3))
buttonstring4 = '200200C'
send(buttonstring4)
if button5:
screen.blit(pluslight,(680-3,360-3))
buttonstring5 = '200200F'
send(buttonstring5)
if button6:
screen.blit(minuslight,(680-3,390-3))
buttonstring6 = '200200E'
send(buttonstring6)
if button7:
screen.blit(gTrimlight,(720-2,375-2))
buttonstring7 = '200200T'
send(buttonstring7)
if button8:
screen.fill(colour,(800,100,1200,500))
pygame.draw.lines(screen, (255,255,255), False, ((800,100), (1160,100), (1160,400),(800,400),(800,100)),1)
iicolour = 0
ii = 800
if button12:
screen.blit(trashlight,(1060-3,40-3))
f2= open('cmd.csv','w')
f2.close()
| true | true |
f7ff3b6a8dd96df33a192fea0c09b1099b7f0769 | 3,645 | py | Python | tests/test_resource/test_mysql.py | roganov/local-data-api | 2c58206f0221c913521778c627ed2bdbff11d274 | [
"MIT"
] | 102 | 2019-06-15T19:32:20.000Z | 2022-03-25T18:39:07.000Z | tests/test_resource/test_mysql.py | roganov/local-data-api | 2c58206f0221c913521778c627ed2bdbff11d274 | [
"MIT"
] | 176 | 2019-06-16T05:57:29.000Z | 2022-03-28T01:26:16.000Z | tests/test_resource/test_mysql.py | healthpraxone/local-data-api | 7f81daee9e80958c082d8d4ebbe767dbfecb2544 | [
"MIT"
] | 20 | 2019-10-30T09:02:20.000Z | 2022-01-14T09:07:26.000Z | from __future__ import annotations
import pytest
from local_data_api.models import ColumnMetadata, ExecuteStatementResponse, Field
from local_data_api.resources import MySQL
from local_data_api.resources.resource import CONNECTION_POOL, RESOURCE_METAS
from tests.test_resource.test_resource import helper_default_test_field
@pytest.fixture
def clear():
RESOURCE_METAS.clear()
CONNECTION_POOL.clear()
def test_create_connection_maker(mocker):
mock_connect = mocker.patch('local_data_api.resources.mysql.pymysql.connect')
connection_maker = MySQL.create_connection_maker(
host='127.0.0.1',
port=3306,
user_name='root',
password='pass',
engine_kwargs={'auto_commit': True},
)
connection_maker(database='test')
mock_connect.assert_called_once_with(
auto_commit=True,
host='127.0.0.1',
password='pass',
port=3306,
user='root',
db='test',
)
mock_connect = mocker.patch('local_data_api.resources.mysql.pymysql.connect')
connection_maker = MySQL.create_connection_maker()
connection_maker()
mock_connect.assert_called_once_with()
def test_execute_select_with_include_metadata(clear, mocker):
connection_mock = mocker.Mock()
cursor_mock = mocker.Mock()
connection_mock.cursor.side_effect = [cursor_mock]
cursor_mock.description = (1, 2, 3, 4, 5, 6, 7), (8, 9, 10, 11, 12, 13, 14)
cursor_mock.fetchall.side_effect = [((1, 'abc'),)]
field_1 = mocker.Mock()
field_1.name = '1'
field_1.org_name = '1'
field_1.flags = 2
field_1.get_column_length.return_value = 5
field_1.scale = 6
field_1.table_name = None
field_2 = mocker.Mock()
field_2.name = '8'
field_2.org_name = '8'
field_2.flags = 2
field_2.get_column_length.return_value = 12
field_2.scale = 13
field_2.table_name = None
cursor_mock._result.fields = [field_1, field_2]
dummy = MySQL(connection_mock, transaction_id='123')
assert (
dummy.execute("select * from users", include_result_metadata=True).dict()
== ExecuteStatementResponse(
numberOfRecordsUpdated=0,
records=[
[dummy.get_field_from_value(1), dummy.get_field_from_value('abc')]
],
columnMetadata=[
ColumnMetadata(
arrayBaseColumnType=0,
isAutoIncrement=False,
isCaseSensitive=False,
isCurrency=False,
isSigned=False,
label='1',
name='1',
nullable=1,
precision=5,
scale=6,
tableName=None,
type=None,
typeName=None,
),
ColumnMetadata(
arrayBaseColumnType=0,
isAutoIncrement=False,
isCaseSensitive=False,
isCurrency=False,
isSigned=False,
label='8',
name='8',
nullable=1,
precision=12,
scale=13,
tableName=None,
type=None,
typeName=None,
),
],
).dict()
)
cursor_mock.execute.assert_called_once_with('select * from users')
cursor_mock.close.assert_called_once_with()
def test_from_value(mocker) -> None:
connection_mock = mocker.Mock()
dummy = MySQL(connection_mock)
helper_default_test_field(dummy)
| 31.695652 | 82 | 0.588477 | from __future__ import annotations
import pytest
from local_data_api.models import ColumnMetadata, ExecuteStatementResponse, Field
from local_data_api.resources import MySQL
from local_data_api.resources.resource import CONNECTION_POOL, RESOURCE_METAS
from tests.test_resource.test_resource import helper_default_test_field
@pytest.fixture
def clear():
RESOURCE_METAS.clear()
CONNECTION_POOL.clear()
def test_create_connection_maker(mocker):
mock_connect = mocker.patch('local_data_api.resources.mysql.pymysql.connect')
connection_maker = MySQL.create_connection_maker(
host='127.0.0.1',
port=3306,
user_name='root',
password='pass',
engine_kwargs={'auto_commit': True},
)
connection_maker(database='test')
mock_connect.assert_called_once_with(
auto_commit=True,
host='127.0.0.1',
password='pass',
port=3306,
user='root',
db='test',
)
mock_connect = mocker.patch('local_data_api.resources.mysql.pymysql.connect')
connection_maker = MySQL.create_connection_maker()
connection_maker()
mock_connect.assert_called_once_with()
def test_execute_select_with_include_metadata(clear, mocker):
connection_mock = mocker.Mock()
cursor_mock = mocker.Mock()
connection_mock.cursor.side_effect = [cursor_mock]
cursor_mock.description = (1, 2, 3, 4, 5, 6, 7), (8, 9, 10, 11, 12, 13, 14)
cursor_mock.fetchall.side_effect = [((1, 'abc'),)]
field_1 = mocker.Mock()
field_1.name = '1'
field_1.org_name = '1'
field_1.flags = 2
field_1.get_column_length.return_value = 5
field_1.scale = 6
field_1.table_name = None
field_2 = mocker.Mock()
field_2.name = '8'
field_2.org_name = '8'
field_2.flags = 2
field_2.get_column_length.return_value = 12
field_2.scale = 13
field_2.table_name = None
cursor_mock._result.fields = [field_1, field_2]
dummy = MySQL(connection_mock, transaction_id='123')
assert (
dummy.execute("select * from users", include_result_metadata=True).dict()
== ExecuteStatementResponse(
numberOfRecordsUpdated=0,
records=[
[dummy.get_field_from_value(1), dummy.get_field_from_value('abc')]
],
columnMetadata=[
ColumnMetadata(
arrayBaseColumnType=0,
isAutoIncrement=False,
isCaseSensitive=False,
isCurrency=False,
isSigned=False,
label='1',
name='1',
nullable=1,
precision=5,
scale=6,
tableName=None,
type=None,
typeName=None,
),
ColumnMetadata(
arrayBaseColumnType=0,
isAutoIncrement=False,
isCaseSensitive=False,
isCurrency=False,
isSigned=False,
label='8',
name='8',
nullable=1,
precision=12,
scale=13,
tableName=None,
type=None,
typeName=None,
),
],
).dict()
)
cursor_mock.execute.assert_called_once_with('select * from users')
cursor_mock.close.assert_called_once_with()
def test_from_value(mocker) -> None:
connection_mock = mocker.Mock()
dummy = MySQL(connection_mock)
helper_default_test_field(dummy)
| true | true |
f7ff3c71fd5dd77a6b787423e569e08d641c51b4 | 15,360 | py | Python | api/app/registrationResponse/tests.py | cclauss/Baobab | e7aad3a63237be5f16e6441b89b3cc708ec19acd | [
"Apache-2.0"
] | null | null | null | api/app/registrationResponse/tests.py | cclauss/Baobab | e7aad3a63237be5f16e6441b89b3cc708ec19acd | [
"Apache-2.0"
] | null | null | null | api/app/registrationResponse/tests.py | cclauss/Baobab | e7aad3a63237be5f16e6441b89b3cc708ec19acd | [
"Apache-2.0"
] | null | null | null | from app.registration.models import RegistrationForm, Registration
from app.registration.models import RegistrationSection
import json
from datetime import datetime, timedelta
from app.utils.testing import ApiTestCase
from app.users.models import AppUser, UserCategory, Country
from app.events.models import Event
from app.registration.models import Offer
from app.registration.models import RegistrationQuestion
from app import app, db
class RegistrationApiTest(ApiTestCase):
def seed_static_data(self, create_registration=False):
test_user = self.add_user('something@email.com', 'Some', 'Thing', 'Mr')
test_user2 = self.add_user('something2@email.com', 'Something2', 'Thing2', 'Mrs')
event_admin = self.add_user('event_admin@ea.com', 'event_admin', is_admin=True)
db.session.commit()
event = Event(
name="Tech Talk",
description="tech talking",
start_date=datetime(2019, 12, 12, 10, 10, 10),
end_date=datetime(2020, 12, 12, 10, 10, 10),
)
db.session.add(event)
db.session.commit()
self.offer = Offer(
user_id=test_user.id,
event_id=event.id,
offer_date=datetime.now(),
expiry_date=datetime.now() + timedelta(days=15),
payment_required=False,
travel_award=True,
accommodation_award=False,
responded_at=datetime.now())
self.offer.candidate_response = True
self.offer.accepted_travel_award = True
db.session.add(self.offer)
db.session.commit()
self.offer2 = Offer(
user_id=test_user2.id,
event_id=event.id,
offer_date=datetime.now(),
expiry_date=datetime.now() + timedelta(days=15),
payment_required=True,
travel_award=True,
accommodation_award=False,
responded_at=datetime.now())
db.session.add(self.offer2)
db.session.commit()
self.offer3 = Offer(
user_id=event_admin.id,
event_id=event.id,
offer_date=datetime.now(),
expiry_date=datetime.now() + timedelta(days=15),
payment_required=True,
travel_award=False,
accommodation_award=True,
responded_at=datetime.now())
db.session.add(self.offer3)
db.session.commit()
self.form = RegistrationForm(
event_id=event.id
)
db.session.add(self.form)
db.session.commit()
section = RegistrationSection(
registration_form_id=self.form.id,
name="Section 1",
description="the section description",
order=1,
show_for_travel_award=True,
show_for_accommodation_award=False,
show_for_payment_required=False,
)
db.session.add(section)
db.session.commit()
section2 = RegistrationSection(
registration_form_id=self.form.id,
name="Section 2",
description="the section 2 description",
order=1,
show_for_travel_award=True,
show_for_accommodation_award=False,
show_for_payment_required=False,
)
db.session.add(section2)
db.session.commit()
self.question = RegistrationQuestion(
section_id=section.id,
registration_form_id=self.form.id,
description="Question 1",
type="short-text",
is_required=True,
order=1,
placeholder="the placeholder",
headline="the headline",
validation_regex="[]/",
validation_text=" text"
)
db.session.add(self.question)
db.session.commit()
self.question2 = RegistrationQuestion(
section_id=section2.id,
registration_form_id=self.form.id,
description="Question 2",
type="short-text",
is_required=True,
order=1,
placeholder="the placeholder",
headline="the headline",
validation_regex="[]/",
validation_text=" text"
)
db.session.add(self.question2)
db.session.commit()
self.question3 = RegistrationQuestion(
section_id=section2.id,
registration_form_id=self.form.id,
description="Question 3",
type="short-text",
is_required=True,
order=1,
placeholder="the placeholder",
headline="the headline",
validation_regex="[]/",
validation_text=" text"
)
db.session.add(self.question3)
db.session.commit()
self.headers = self.get_auth_header_for("something@email.com")
self.headers2 = self.get_auth_header_for("something2@email.com")
self.adminHeaders = self.get_auth_header_for("event_admin@ea.com")
if create_registration:
self.registration1 = Registration(self.offer.id, self.form.id, confirmed=False)
db.session.add(self.registration1)
db.session.commit()
self.registration2 = Registration(self.offer2.id, self.form.id, confirmed=True)
db.session.add(self.registration2)
db.session.commit()
self.registration3 = Registration(self.offer3.id, self.form.id, confirmed=False)
db.session.add(self.registration3)
db.session.commit()
db.session.flush()
def get_auth_header_for(self, email):
body = {
'email': email,
'password': 'abc'
}
response = self.app.post('api/v1/authenticate', data=body)
data = json.loads(response.data)
header = {'Authorization': data['token']}
return header
def test_create_registration(self):
with app.app_context():
self.seed_static_data(create_registration=False)
registration_data = {
'offer_id': self.offer.id,
'registration_form_id': self.form.id,
'answers': [
{
'registration_question_id': self.question.id,
'value': 'Answer 1'
},
{
'registration_question_id': self.question2.id,
'value': 'Hello world, this is the 2nd answer.'
},
{
'registration_question_id': self.question3.id,
'value': 'Hello world, this is the 3rd answer.'
}
]
}
response = self.app.post(
'/api/v1/registration-response',
data=json.dumps(registration_data),
content_type='application/json',
headers=self.headers)
self.assertEqual(response.status_code, 201)
def test_get_registration(self):
with app.app_context():
self.seed_static_data()
registration_data = {
'offer_id': self.offer.id,
'registration_form_id': self.form.id,
'answers': [
{
'registration_question_id': self.question.id,
'value': 'Answer 1'
},
{
'registration_question_id': self.question2.id,
'value': 'Hello world, this is the 2nd answer.'
},
{
'registration_question_id': self.question3.id,
'value': 'Hello world, this is the 3rd answer.'
}
]
}
response = self.app.post(
'/api/v1/registration-response',
data=json.dumps(registration_data),
content_type='application/json',
headers=self.headers
)
response = self.app.get(
'/api/v1/registration-response',
content_type='application/json',
headers=self.headers)
self.assertEqual(response.status_code, 200)
def test_update_200(self):
"""Test if update work"""
with app.app_context():
self.seed_static_data()
registration_data = {
'offer_id': self.offer.id,
'registration_form_id': self.form.id,
'answers': [
{
'registration_question_id': self.question.id,
'value': 'Answer 1'
},
{
'registration_question_id': self.question2.id,
'value': 'Hello world, this is the 2nd answer.'
},
{
'registration_question_id': self.question3.id,
'value': 'Hello world, this is the 3rd answer.'
}
]
}
response = self.app.post(
'/api/v1/registration-response',
data=json.dumps(registration_data),
content_type='application/json',
headers=self.headers
)
data = json.loads(response.data)
put_registration_data = {
'registration_id': data['id'],
'offer_id': self.offer.id,
'registration_form_id': self.form.id,
'answers': [
{
'registration_question_id': self.question.id,
'value': 'Answer 1'
},
{
'registration_question_id': self.question2.id,
'value': 'Hello world, this is the 2nd answer.'
},
{
'registration_question_id': self.question3.id,
'value': 'Hello world, this is the 3rd answer.'
}
]
}
post_response = self.app.put(
'/api/v1/registration-response',
data=json.dumps(put_registration_data),
content_type='application/json',
headers=self.headers)
self.assertEqual(post_response.status_code, 200)
def test_update_missing(self):
"""Test that 404 is returned if we try to update a registration for a user that doesnt exist"""
with app.app_context():
self.seed_static_data()
registration_data = {
'registration_id': 50,
'offer_id': self.offer.id,
'registration_form_id': self.form.id,
'answers': [
{
'registration_question_id': self.question.id,
'value': 'Answer 1'
},
{
'registration_question_id': self.question2.id,
'value': 'Hello world, this is the 2nd answer.'
},
{
'registration_question_id': self.question3.id,
'value': 'Hello world, this is the 3rd answer.'
}
]
}
response = self.app.put(
'/api/v1/registration-response',
data=json.dumps(registration_data),
content_type='application/json',
headers=self.headers)
self.assertEqual(response.status_code, 404)
def test_get_unconfirmed_not_event_admin(self):
with app.app_context():
self.seed_static_data()
response = self.app.get('/api/v1/registration/unconfirmed?event_id=1',
headers=self.headers)
self.assertEqual(response.status_code, 403)
def test_get_unconfirmed(self):
with app.app_context():
self.seed_static_data(create_registration=True)
response = self.app.get('/api/v1/registration/unconfirmed?event_id=1',
headers=self.adminHeaders)
responses = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(responses), 2)
self.assertEqual(responses[0]['registration_id'], self.registration1.id)
self.assertEqual(responses[0]['user_id'], self.offer.user_id)
self.assertEqual(responses[0]['firstname'], 'Some')
self.assertEqual(responses[0]['lastname'], 'Thing')
self.assertEqual(responses[0]['email'], 'something@email.com')
self.assertEqual(responses[0]['user_category'], 'Postdoc')
self.assertEqual(responses[0]['affiliation'], 'university X')
self.assertEqual(responses[0]['created_at'][:9], datetime.today().isoformat()[:9])
self.assertEqual(responses[1]['registration_id'], self.registration3.id)
self.assertEqual(responses[1]['user_id'], self.offer3.user_id)
self.assertEqual(responses[1]['firstname'], 'event_admin')
self.assertEqual(responses[1]['lastname'], 'Lastname')
self.assertEqual(responses[1]['email'], 'event_admin@ea.com')
self.assertEqual(responses[1]['user_category'], 'Postdoc')
self.assertEqual(responses[1]['affiliation'], 'university X')
self.assertEqual(responses[1]['created_at'][:9], datetime.today().isoformat()[:9])
def test_get_confirmed_not_event_admin(self):
with app.app_context():
self.seed_static_data()
response = self.app.get('/api/v1/registration/confirmed?event_id=1',
headers=self.headers)
self.assertEqual(response.status_code, 403)
def test_get_confirmed(self):
with app.app_context():
self.seed_static_data(create_registration=True)
response = self.app.get('/api/v1/registration/confirmed?event_id=1',
headers=self.adminHeaders)
responses = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(responses), 3)
def test_confirm_admin(self):
with app.app_context():
self.seed_static_data(create_registration=True)
response = self.app.post('/api/v1/registration/confirm',
data={'registration_id': self.registration1.id},
headers=self.headers)
self.assertEqual(response.status_code, 403)
def test_confirm(self):
with app.app_context():
self.seed_static_data(create_registration=True)
response = self.app.post('/api/v1/registration/confirm',
data={'registration_id': self.registration1.id},
headers=self.adminHeaders)
self.assertEqual(response.status_code, 200)
updated_registration = db.session.query(Registration).filter(Registration.id == self.registration1.id).one()
self.assertTrue(updated_registration.confirmed)
| 39.083969 | 120 | 0.544596 | from app.registration.models import RegistrationForm, Registration
from app.registration.models import RegistrationSection
import json
from datetime import datetime, timedelta
from app.utils.testing import ApiTestCase
from app.users.models import AppUser, UserCategory, Country
from app.events.models import Event
from app.registration.models import Offer
from app.registration.models import RegistrationQuestion
from app import app, db
class RegistrationApiTest(ApiTestCase):
def seed_static_data(self, create_registration=False):
test_user = self.add_user('something@email.com', 'Some', 'Thing', 'Mr')
test_user2 = self.add_user('something2@email.com', 'Something2', 'Thing2', 'Mrs')
event_admin = self.add_user('event_admin@ea.com', 'event_admin', is_admin=True)
db.session.commit()
event = Event(
name="Tech Talk",
description="tech talking",
start_date=datetime(2019, 12, 12, 10, 10, 10),
end_date=datetime(2020, 12, 12, 10, 10, 10),
)
db.session.add(event)
db.session.commit()
self.offer = Offer(
user_id=test_user.id,
event_id=event.id,
offer_date=datetime.now(),
expiry_date=datetime.now() + timedelta(days=15),
payment_required=False,
travel_award=True,
accommodation_award=False,
responded_at=datetime.now())
self.offer.candidate_response = True
self.offer.accepted_travel_award = True
db.session.add(self.offer)
db.session.commit()
self.offer2 = Offer(
user_id=test_user2.id,
event_id=event.id,
offer_date=datetime.now(),
expiry_date=datetime.now() + timedelta(days=15),
payment_required=True,
travel_award=True,
accommodation_award=False,
responded_at=datetime.now())
db.session.add(self.offer2)
db.session.commit()
self.offer3 = Offer(
user_id=event_admin.id,
event_id=event.id,
offer_date=datetime.now(),
expiry_date=datetime.now() + timedelta(days=15),
payment_required=True,
travel_award=False,
accommodation_award=True,
responded_at=datetime.now())
db.session.add(self.offer3)
db.session.commit()
self.form = RegistrationForm(
event_id=event.id
)
db.session.add(self.form)
db.session.commit()
section = RegistrationSection(
registration_form_id=self.form.id,
name="Section 1",
description="the section description",
order=1,
show_for_travel_award=True,
show_for_accommodation_award=False,
show_for_payment_required=False,
)
db.session.add(section)
db.session.commit()
section2 = RegistrationSection(
registration_form_id=self.form.id,
name="Section 2",
description="the section 2 description",
order=1,
show_for_travel_award=True,
show_for_accommodation_award=False,
show_for_payment_required=False,
)
db.session.add(section2)
db.session.commit()
self.question = RegistrationQuestion(
section_id=section.id,
registration_form_id=self.form.id,
description="Question 1",
type="short-text",
is_required=True,
order=1,
placeholder="the placeholder",
headline="the headline",
validation_regex="[]/",
validation_text=" text"
)
db.session.add(self.question)
db.session.commit()
self.question2 = RegistrationQuestion(
section_id=section2.id,
registration_form_id=self.form.id,
description="Question 2",
type="short-text",
is_required=True,
order=1,
placeholder="the placeholder",
headline="the headline",
validation_regex="[]/",
validation_text=" text"
)
db.session.add(self.question2)
db.session.commit()
self.question3 = RegistrationQuestion(
section_id=section2.id,
registration_form_id=self.form.id,
description="Question 3",
type="short-text",
is_required=True,
order=1,
placeholder="the placeholder",
headline="the headline",
validation_regex="[]/",
validation_text=" text"
)
db.session.add(self.question3)
db.session.commit()
self.headers = self.get_auth_header_for("something@email.com")
self.headers2 = self.get_auth_header_for("something2@email.com")
self.adminHeaders = self.get_auth_header_for("event_admin@ea.com")
if create_registration:
self.registration1 = Registration(self.offer.id, self.form.id, confirmed=False)
db.session.add(self.registration1)
db.session.commit()
self.registration2 = Registration(self.offer2.id, self.form.id, confirmed=True)
db.session.add(self.registration2)
db.session.commit()
self.registration3 = Registration(self.offer3.id, self.form.id, confirmed=False)
db.session.add(self.registration3)
db.session.commit()
db.session.flush()
def get_auth_header_for(self, email):
body = {
'email': email,
'password': 'abc'
}
response = self.app.post('api/v1/authenticate', data=body)
data = json.loads(response.data)
header = {'Authorization': data['token']}
return header
def test_create_registration(self):
with app.app_context():
self.seed_static_data(create_registration=False)
registration_data = {
'offer_id': self.offer.id,
'registration_form_id': self.form.id,
'answers': [
{
'registration_question_id': self.question.id,
'value': 'Answer 1'
},
{
'registration_question_id': self.question2.id,
'value': 'Hello world, this is the 2nd answer.'
},
{
'registration_question_id': self.question3.id,
'value': 'Hello world, this is the 3rd answer.'
}
]
}
response = self.app.post(
'/api/v1/registration-response',
data=json.dumps(registration_data),
content_type='application/json',
headers=self.headers)
self.assertEqual(response.status_code, 201)
def test_get_registration(self):
with app.app_context():
self.seed_static_data()
registration_data = {
'offer_id': self.offer.id,
'registration_form_id': self.form.id,
'answers': [
{
'registration_question_id': self.question.id,
'value': 'Answer 1'
},
{
'registration_question_id': self.question2.id,
'value': 'Hello world, this is the 2nd answer.'
},
{
'registration_question_id': self.question3.id,
'value': 'Hello world, this is the 3rd answer.'
}
]
}
response = self.app.post(
'/api/v1/registration-response',
data=json.dumps(registration_data),
content_type='application/json',
headers=self.headers
)
response = self.app.get(
'/api/v1/registration-response',
content_type='application/json',
headers=self.headers)
self.assertEqual(response.status_code, 200)
def test_update_200(self):
with app.app_context():
self.seed_static_data()
registration_data = {
'offer_id': self.offer.id,
'registration_form_id': self.form.id,
'answers': [
{
'registration_question_id': self.question.id,
'value': 'Answer 1'
},
{
'registration_question_id': self.question2.id,
'value': 'Hello world, this is the 2nd answer.'
},
{
'registration_question_id': self.question3.id,
'value': 'Hello world, this is the 3rd answer.'
}
]
}
response = self.app.post(
'/api/v1/registration-response',
data=json.dumps(registration_data),
content_type='application/json',
headers=self.headers
)
data = json.loads(response.data)
put_registration_data = {
'registration_id': data['id'],
'offer_id': self.offer.id,
'registration_form_id': self.form.id,
'answers': [
{
'registration_question_id': self.question.id,
'value': 'Answer 1'
},
{
'registration_question_id': self.question2.id,
'value': 'Hello world, this is the 2nd answer.'
},
{
'registration_question_id': self.question3.id,
'value': 'Hello world, this is the 3rd answer.'
}
]
}
post_response = self.app.put(
'/api/v1/registration-response',
data=json.dumps(put_registration_data),
content_type='application/json',
headers=self.headers)
self.assertEqual(post_response.status_code, 200)
def test_update_missing(self):
with app.app_context():
self.seed_static_data()
registration_data = {
'registration_id': 50,
'offer_id': self.offer.id,
'registration_form_id': self.form.id,
'answers': [
{
'registration_question_id': self.question.id,
'value': 'Answer 1'
},
{
'registration_question_id': self.question2.id,
'value': 'Hello world, this is the 2nd answer.'
},
{
'registration_question_id': self.question3.id,
'value': 'Hello world, this is the 3rd answer.'
}
]
}
response = self.app.put(
'/api/v1/registration-response',
data=json.dumps(registration_data),
content_type='application/json',
headers=self.headers)
self.assertEqual(response.status_code, 404)
def test_get_unconfirmed_not_event_admin(self):
with app.app_context():
self.seed_static_data()
response = self.app.get('/api/v1/registration/unconfirmed?event_id=1',
headers=self.headers)
self.assertEqual(response.status_code, 403)
def test_get_unconfirmed(self):
with app.app_context():
self.seed_static_data(create_registration=True)
response = self.app.get('/api/v1/registration/unconfirmed?event_id=1',
headers=self.adminHeaders)
responses = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(responses), 2)
self.assertEqual(responses[0]['registration_id'], self.registration1.id)
self.assertEqual(responses[0]['user_id'], self.offer.user_id)
self.assertEqual(responses[0]['firstname'], 'Some')
self.assertEqual(responses[0]['lastname'], 'Thing')
self.assertEqual(responses[0]['email'], 'something@email.com')
self.assertEqual(responses[0]['user_category'], 'Postdoc')
self.assertEqual(responses[0]['affiliation'], 'university X')
self.assertEqual(responses[0]['created_at'][:9], datetime.today().isoformat()[:9])
self.assertEqual(responses[1]['registration_id'], self.registration3.id)
self.assertEqual(responses[1]['user_id'], self.offer3.user_id)
self.assertEqual(responses[1]['firstname'], 'event_admin')
self.assertEqual(responses[1]['lastname'], 'Lastname')
self.assertEqual(responses[1]['email'], 'event_admin@ea.com')
self.assertEqual(responses[1]['user_category'], 'Postdoc')
self.assertEqual(responses[1]['affiliation'], 'university X')
self.assertEqual(responses[1]['created_at'][:9], datetime.today().isoformat()[:9])
def test_get_confirmed_not_event_admin(self):
with app.app_context():
self.seed_static_data()
response = self.app.get('/api/v1/registration/confirmed?event_id=1',
headers=self.headers)
self.assertEqual(response.status_code, 403)
def test_get_confirmed(self):
with app.app_context():
self.seed_static_data(create_registration=True)
response = self.app.get('/api/v1/registration/confirmed?event_id=1',
headers=self.adminHeaders)
responses = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(responses), 3)
def test_confirm_admin(self):
with app.app_context():
self.seed_static_data(create_registration=True)
response = self.app.post('/api/v1/registration/confirm',
data={'registration_id': self.registration1.id},
headers=self.headers)
self.assertEqual(response.status_code, 403)
def test_confirm(self):
with app.app_context():
self.seed_static_data(create_registration=True)
response = self.app.post('/api/v1/registration/confirm',
data={'registration_id': self.registration1.id},
headers=self.adminHeaders)
self.assertEqual(response.status_code, 200)
updated_registration = db.session.query(Registration).filter(Registration.id == self.registration1.id).one()
self.assertTrue(updated_registration.confirmed)
| true | true |
f7ff3ca0c18cb91c08e9be2931bae83818bf7916 | 3,768 | py | Python | test/unit/helper/test_helper.py | hanneshauer/python-client | e5909ed4e364d0c980f80e48b3af4acf77bff08e | [
"Apache-2.0"
] | null | null | null | test/unit/helper/test_helper.py | hanneshauer/python-client | e5909ed4e364d0c980f80e48b3af4acf77bff08e | [
"Apache-2.0"
] | null | null | null | test/unit/helper/test_helper.py | hanneshauer/python-client | e5909ed4e364d0c980f80e48b3af4acf77bff08e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import httpretty
from appium import webdriver
# :return: A string of test URL
SERVER_URL_BASE = 'http://localhost:4723/wd/hub'
def appium_command(command):
"""Return a command of Appium
Returns:
str: A string of command URL
"""
return '{}{}'.format(SERVER_URL_BASE, command)
def android_w3c_driver():
"""Return a W3C driver which is generated by a mock response for Android
Returns:
`webdriver.webdriver.WebDriver`: An instance of WebDriver
"""
response_body_json = json.dumps(
{
'value': {
'sessionId': '1234567890',
'capabilities': {
'platform': 'LINUX',
'desired': {
'platformName': 'Android',
'automationName': 'uiautomator2',
'platformVersion': '7.1.1',
'deviceName': 'Android Emulator',
'app': '/test/apps/ApiDemos-debug.apk',
},
'platformName': 'Android',
'automationName': 'uiautomator2',
'platformVersion': '7.1.1',
'deviceName': 'emulator-5554',
'app': '/test/apps/ApiDemos-debug.apk',
'deviceUDID': 'emulator-5554',
'appPackage': 'io.appium.android.apis',
'appWaitPackage': 'io.appium.android.apis',
'appActivity': 'io.appium.android.apis.ApiDemos',
'appWaitActivity': 'io.appium.android.apis.ApiDemos'
}
}
}
)
httpretty.register_uri(
httpretty.POST,
appium_command('/session'),
body=response_body_json
)
desired_caps = {
'platformName': 'Android',
'deviceName': 'Android Emulator',
'app': 'path/to/app',
'automationName': 'UIAutomator2'
}
driver = webdriver.Remote(
SERVER_URL_BASE,
desired_caps
)
return driver
def ios_w3c_driver():
"""Return a W3C driver which is generated by a mock response for iOS
Returns:
`webdriver.webdriver.WebDriver`: An instance of WebDriver
"""
response_body_json = json.dumps(
{
'value': {
'sessionId': '1234567890',
'capabilities': {
'device': 'iphone',
'browserName': 'UICatalog',
'sdkVersion': '11.4',
'CFBundleIdentifier': 'com.example.apple-samplecode.UICatalog'
}
}
}
)
httpretty.register_uri(
httpretty.POST,
appium_command('/session'),
body=response_body_json
)
desired_caps = {
'platformName': 'iOS',
'deviceName': 'iPhone Simulator',
'app': 'path/to/app',
'automationName': 'XCUITest'
}
driver = webdriver.Remote(
SERVER_URL_BASE,
desired_caps
)
return driver
def get_httpretty_request_body(request):
"""Returns utf-8 decoded request body"""
return json.loads(request.body.decode('utf-8'))
| 28.330827 | 82 | 0.556263 |
import json
import httpretty
from appium import webdriver
SERVER_URL_BASE = 'http://localhost:4723/wd/hub'
def appium_command(command):
return '{}{}'.format(SERVER_URL_BASE, command)
def android_w3c_driver():
response_body_json = json.dumps(
{
'value': {
'sessionId': '1234567890',
'capabilities': {
'platform': 'LINUX',
'desired': {
'platformName': 'Android',
'automationName': 'uiautomator2',
'platformVersion': '7.1.1',
'deviceName': 'Android Emulator',
'app': '/test/apps/ApiDemos-debug.apk',
},
'platformName': 'Android',
'automationName': 'uiautomator2',
'platformVersion': '7.1.1',
'deviceName': 'emulator-5554',
'app': '/test/apps/ApiDemos-debug.apk',
'deviceUDID': 'emulator-5554',
'appPackage': 'io.appium.android.apis',
'appWaitPackage': 'io.appium.android.apis',
'appActivity': 'io.appium.android.apis.ApiDemos',
'appWaitActivity': 'io.appium.android.apis.ApiDemos'
}
}
}
)
httpretty.register_uri(
httpretty.POST,
appium_command('/session'),
body=response_body_json
)
desired_caps = {
'platformName': 'Android',
'deviceName': 'Android Emulator',
'app': 'path/to/app',
'automationName': 'UIAutomator2'
}
driver = webdriver.Remote(
SERVER_URL_BASE,
desired_caps
)
return driver
def ios_w3c_driver():
response_body_json = json.dumps(
{
'value': {
'sessionId': '1234567890',
'capabilities': {
'device': 'iphone',
'browserName': 'UICatalog',
'sdkVersion': '11.4',
'CFBundleIdentifier': 'com.example.apple-samplecode.UICatalog'
}
}
}
)
httpretty.register_uri(
httpretty.POST,
appium_command('/session'),
body=response_body_json
)
desired_caps = {
'platformName': 'iOS',
'deviceName': 'iPhone Simulator',
'app': 'path/to/app',
'automationName': 'XCUITest'
}
driver = webdriver.Remote(
SERVER_URL_BASE,
desired_caps
)
return driver
def get_httpretty_request_body(request):
return json.loads(request.body.decode('utf-8'))
| true | true |
f7ff3d1735d45ed7e644e15938bd9ea707708e6f | 5,895 | py | Python | app/connector.py | EgorkA82/Signalerator | f501dbb9400754a04f85e1be438719cb8d11ad5f | [
"MIT"
] | null | null | null | app/connector.py | EgorkA82/Signalerator | f501dbb9400754a04f85e1be438719cb8d11ad5f | [
"MIT"
] | null | null | null | app/connector.py | EgorkA82/Signalerator | f501dbb9400754a04f85e1be438719cb8d11ad5f | [
"MIT"
] | null | null | null | from PyQt5.QtCore import QIODevice
from PyQt5.QtWidgets import QComboBox
from PyQt5 import QtSerialPort
from thread import Thread
from worker import Worker
from ui_controller import Ui_Controller
class Connector():
def __init__(self, ui_controller: Ui_Controller) -> None: # инициализируем объект
self.connected: bool = False
self.acceptable_signatures: list = [signature.lower() for signature in ["Arduino", "CH340"]] # устанавливаем допустимые сигнатуры совместимых устройств
self.serial_info: list = QtSerialPort.QSerialPortInfo() # считываем информацию о последовательных портах
self.ui_controller: Ui_Controller = ui_controller # наследуем объект работы с интерфейсом
self.ports_list: QComboBox = self.ui_controller.ports_list() # получаем объект работы со списком портов
self.serial: QtSerialPort.QSerialPort = QtSerialPort.QSerialPort() # создаем объект порта
self.serial.setBaudRate(9600) # устанавливаем скорость общения устройств
self.serial.setParity(QtSerialPort.QSerialPort.Parity.NoParity)
self.serial.setDataBits(QtSerialPort.QSerialPort.DataBits.Data8)
self.serial.setStopBits(QtSerialPort.QSerialPort.StopBits.OneStop)
self.serial.setFlowControl(QtSerialPort.QSerialPort.FlowControl.NoFlowControl)
self.ui_controller.ui.maxVoltageSpinBox.valueChanged.connect(self.update_max_voltage) # обновляем значения напряжений
self.ui_controller.ui.minVoltageSpinBox.valueChanged.connect(self.update_min_voltage)
self.init_voltages()
self.update_ports_list() # обновляем поле выбора порта
def launch_ports_updater(self) -> None: # запускаем обновление списка портов в отдельном потоке
self.ports_updater_worker: Worker = Worker(self.update_ports_list) # создаем обработчик и передаем функцию
self.ports_updater_thread: Thread = Thread(interval_ms=1000, worker=self.ports_updater_worker) # создаем поток
self.ports_updater_thread.run() # запускаем поток и передаем обработчик
def get_available_ports(self) -> list: # возвращаем список доступных COM-портов
return list(filter(lambda port: port.portName()[:3] == "COM" and port.portName() != "COM1", self.serial_info.availablePorts())) # оставляем только COM-порты кроме 1-го и 2-го
def update_ports_list(self) -> None: # обновляем список портов
self.available_ports: list = self.get_available_ports() # получаем список достуных портов
current_port_is_in_ports: bool = any([self.serial.portName() in port.portName() for port in self.available_ports]) # проверка на наличие порта в списке доступных
if self.connected and not current_port_is_in_ports: # проверка на незапланированное отключение устройства
self.connected: bool = False
self.ui_controller.set_mode(Ui_Controller.NOT_CONNECTED, ports_are_available=bool(self.available_ports)) # выполняем необходимое изменение интерфейса
self.serial.close() # закрываем подключение
if not self.connected:
self.ports_list.clear() # очищаем список портов
for port in self.available_ports:
self.ports_list.addItem(f"{port.portName()} | {port.description()}", port.portName()) # добавляем порт в список выбора
self.ui_controller.set_mode(Ui_Controller.NOT_CONNECTED, ports_are_available=bool(self.available_ports)) # выполняем необходимое изменение интерфейса
def open_connection(self, port_name: str) -> bool: # подключаемся к указанному порту
self.ui_controller.set_mode(Ui_Controller.CONNECTING) # выполняем необходимое изменение интерфейса
self.serial.setPortName(port_name) # устанавливаем имя используемого порта
self.connected: bool = self.serial.open(QIODevice.ReadWrite) # открываем соединение
if self.connected: # проверяем успешность подключения
self.ui_controller.set_mode(Ui_Controller.CONNECTED) # выполняем необходимое изменение интерфейса
else:
self.ui_controller.set_mode(Ui_Controller.NOT_CONNECTED, ports_are_available=bool(self.available_ports)) # выполняем необходимое изменение интерфейса
return self.connected # возвращаем успешность подключения
def close_connection(self) -> bool: # отключаемся от испольемого порта
self.serial.close() # закрываем соединение
self.connected: bool = False
self.ui_controller.set_mode(Ui_Controller.NOT_CONNECTED, ports_are_available=bool(self.available_ports)) # выполняем необходимое изменение интерфейса
def send_value(self, value: float) -> int: # отправляем сообщение
# print(f"{self.convert(value, 0, 1, self.min_voltage, self.max_voltage):.1f}V \t {int(self.convert(value, 0, 1, self.min_voltage * 51, self.max_voltage * 51))}")
return self.serial.write(str(int(self.convert(value, 0, 1, self.min_voltage * 51, self.max_voltage * 51))).encode())
def init_voltages(self) -> None:
self.update_max_voltage()
self.update_min_voltage()
def update_max_voltage(self) -> None: # устанавливаем меньшее напряжение всегда меньше, чем большее
self.max_voltage: float = self.ui_controller.ui.maxVoltageSpinBox.value()
self.ui_controller.ui.minVoltageSpinBox.setMaximum(self.max_voltage - 0.1)
self.min_voltage: float = self.ui_controller.ui.minVoltageSpinBox.value()
def update_min_voltage(self) -> None:
self.min_voltage: float = self.ui_controller.ui.minVoltageSpinBox.value()
@staticmethod
def convert(value: float, from_min: float, from_max: float, to_min: float, to_max: float) -> float:
return (value - from_min) * (to_max - to_min) / (from_max - from_min) + to_min
| 63.387097 | 183 | 0.723325 | from PyQt5.QtCore import QIODevice
from PyQt5.QtWidgets import QComboBox
from PyQt5 import QtSerialPort
from thread import Thread
from worker import Worker
from ui_controller import Ui_Controller
class Connector():
def __init__(self, ui_controller: Ui_Controller) -> None:
self.connected: bool = False
self.acceptable_signatures: list = [signature.lower() for signature in ["Arduino", "CH340"]]
self.serial_info: list = QtSerialPort.QSerialPortInfo()
self.ui_controller: Ui_Controller = ui_controller
self.ports_list: QComboBox = self.ui_controller.ports_list()
self.serial: QtSerialPort.QSerialPort = QtSerialPort.QSerialPort()
self.serial.setBaudRate(9600)
self.serial.setParity(QtSerialPort.QSerialPort.Parity.NoParity)
self.serial.setDataBits(QtSerialPort.QSerialPort.DataBits.Data8)
self.serial.setStopBits(QtSerialPort.QSerialPort.StopBits.OneStop)
self.serial.setFlowControl(QtSerialPort.QSerialPort.FlowControl.NoFlowControl)
self.ui_controller.ui.maxVoltageSpinBox.valueChanged.connect(self.update_max_voltage)
self.ui_controller.ui.minVoltageSpinBox.valueChanged.connect(self.update_min_voltage)
self.init_voltages()
self.update_ports_list()
def launch_ports_updater(self) -> None:
self.ports_updater_worker: Worker = Worker(self.update_ports_list)
self.ports_updater_thread: Thread = Thread(interval_ms=1000, worker=self.ports_updater_worker)
self.ports_updater_thread.run()
def get_available_ports(self) -> list:
return list(filter(lambda port: port.portName()[:3] == "COM" and port.portName() != "COM1", self.serial_info.availablePorts()))
def update_ports_list(self) -> None:
self.available_ports: list = self.get_available_ports()
current_port_is_in_ports: bool = any([self.serial.portName() in port.portName() for port in self.available_ports])
if self.connected and not current_port_is_in_ports:
self.connected: bool = False
self.ui_controller.set_mode(Ui_Controller.NOT_CONNECTED, ports_are_available=bool(self.available_ports))
self.serial.close()
if not self.connected:
self.ports_list.clear()
for port in self.available_ports:
self.ports_list.addItem(f"{port.portName()} | {port.description()}", port.portName())
self.ui_controller.set_mode(Ui_Controller.NOT_CONNECTED, ports_are_available=bool(self.available_ports))
def open_connection(self, port_name: str) -> bool:
self.ui_controller.set_mode(Ui_Controller.CONNECTING)
self.serial.setPortName(port_name)
self.connected: bool = self.serial.open(QIODevice.ReadWrite)
if self.connected:
self.ui_controller.set_mode(Ui_Controller.CONNECTED)
else:
self.ui_controller.set_mode(Ui_Controller.NOT_CONNECTED, ports_are_available=bool(self.available_ports))
return self.connected
def close_connection(self) -> bool:
self.serial.close()
self.connected: bool = False
self.ui_controller.set_mode(Ui_Controller.NOT_CONNECTED, ports_are_available=bool(self.available_ports))
def send_value(self, value: float) -> int:
return self.serial.write(str(int(self.convert(value, 0, 1, self.min_voltage * 51, self.max_voltage * 51))).encode())
def init_voltages(self) -> None:
self.update_max_voltage()
self.update_min_voltage()
def update_max_voltage(self) -> None:
self.max_voltage: float = self.ui_controller.ui.maxVoltageSpinBox.value()
self.ui_controller.ui.minVoltageSpinBox.setMaximum(self.max_voltage - 0.1)
self.min_voltage: float = self.ui_controller.ui.minVoltageSpinBox.value()
def update_min_voltage(self) -> None:
self.min_voltage: float = self.ui_controller.ui.minVoltageSpinBox.value()
@staticmethod
def convert(value: float, from_min: float, from_max: float, to_min: float, to_max: float) -> float:
return (value - from_min) * (to_max - to_min) / (from_max - from_min) + to_min
| true | true |
f7ff3e6ac2c23c4925998fed2103c622f16abdf1 | 1,122 | py | Python | kubernetes/test/test_v1beta1_json_schema_props_or_string_array.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | 1 | 2020-05-08T12:41:04.000Z | 2020-05-08T12:41:04.000Z | kubernetes/test/test_v1beta1_json_schema_props_or_string_array.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1beta1_json_schema_props_or_string_array.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | 2 | 2021-07-09T08:49:05.000Z | 2021-08-03T18:08:36.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_json_schema_props_or_string_array import V1beta1JSONSchemaPropsOrStringArray
class TestV1beta1JSONSchemaPropsOrStringArray(unittest.TestCase):
""" V1beta1JSONSchemaPropsOrStringArray unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1JSONSchemaPropsOrStringArray(self):
"""
Test V1beta1JSONSchemaPropsOrStringArray
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_json_schema_props_or_string_array.V1beta1JSONSchemaPropsOrStringArray()
pass
if __name__ == '__main__':
unittest.main()
| 24.933333 | 121 | 0.750446 |
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_json_schema_props_or_string_array import V1beta1JSONSchemaPropsOrStringArray
class TestV1beta1JSONSchemaPropsOrStringArray(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1JSONSchemaPropsOrStringArray(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f7ff40333fd08c8dd162f30b46d3b2e9b92cf3ad | 6,640 | py | Python | bc81asmc/bc81asmc_grammar.py | bcichonski/bc16 | 4fc19735b5d4c0cdd3e97fb3c1a4a76004398d27 | [
"MIT"
] | null | null | null | bc81asmc/bc81asmc_grammar.py | bcichonski/bc16 | 4fc19735b5d4c0cdd3e97fb3c1a4a76004398d27 | [
"MIT"
] | null | null | null | bc81asmc/bc81asmc_grammar.py | bcichonski/bc16 | 4fc19735b5d4c0cdd3e97fb3c1a4a76004398d27 | [
"MIT"
] | null | null | null | from parsy import regex, Parser, string, seq, letter, digit
from bc81asmc_ast import *
hexstr2int = lambda x: int(x, 16)
comment = regex(r';[^\r\n]*').desc('comment')
whitespace = regex(r'[ \t]').desc('whitespace')
whitespaces = regex(r'[ \t]*').desc('whitespaces')
ignore = whitespaces
sep = whitespace.at_least(1)
nl = regex(r'(\r\n|\r|\n)').desc('new line')
lexeme = lambda p: p << ignore
colon = lexeme(string(':'))
comma = lexeme(string(','))
hash = string('#')
underscore = string('_')
hexprefix = string('0x')
accumulator = string('a').desc('accumulator')
quote = string("'")
ident = letter + (letter | digit | underscore).many().concat()
quotedstr = lexeme(quote >> regex(r"[^']*") << quote).desc('quoted string')
heximm4 = lexeme(
(hexprefix >> regex(r'[0-9a-fA-F]'))
.map(hexstr2int)
.desc('hex immediate 4bit value')
)
heximm16 = lexeme(
(hexprefix >> regex(r'[0-9a-fA-F]{4}'))
.map(hexstr2int)
.desc('hex immediate 16bit value')
)
heximm8 = lexeme(
(hexprefix >> regex(r'[0-9a-fA-F]{2}'))
.map(hexstr2int)
.desc('hex immediate 8bit value')
)
heximm16 = lexeme(
(hexprefix >> regex(r'[0-9a-fA-F]{4}'))
.map(hexstr2int)
.desc('hex immediate 16bit value')
)
paramreg = (
string('pc') | string('ss') | string('si') | string('f') | string('a') |
string('ci') | string('cs') | string('di') | string('ds')
).desc('register name')
paramdb = lexeme(quotedstr | heximm8)
mNOP = lexeme(string('nop')).map(NOP).desc('nop instruction')
mINC = lexeme(string('inc') >> sep >> accumulator)\
.map(INC)\
.desc('inc instruction')
mDEC = lexeme(string('dec') >> sep >> accumulator)\
.map(DEC)\
.desc('dec instruction')
mNOT = lexeme(string('not') >> sep >> accumulator)\
.map(NOT)\
.desc('not instruction')
mMOVri8 = \
lexeme(string('mov') >> sep >>
seq(
lexeme(paramreg << comma),
heximm8
).combine(MOVRI8)
)\
.desc('mov r,i8 instruction')
mMOVrr = \
lexeme(string('mov') >> sep >>
seq(
lexeme(paramreg << comma),
paramreg
).combine(MOVRR)
)\
.desc('mov r,r instruction')
mMOVrm = \
lexeme(string('mov') >> sep >>
seq(
lexeme(paramreg << comma),
hash >> (paramreg * 2).concat()
).combine(MOVRM)
)\
.desc('mov r,#r instruction')
mMOVmr = \
lexeme(string('mov') >> sep >>
seq(
lexeme(hash >> (paramreg * 2).concat() << comma),
paramreg
).combine(MOVMR)
)\
.desc('mov #r,r instruction')
mADDi8 = \
lexeme(string('add') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('add', x))\
.desc('add i8 instruction')
mADDr = \
lexeme(string('add') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('add', x))\
.desc('add r instruction')
mSUBi8 = \
lexeme(string('sub') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('sub', x))\
.desc('sub i8 instruction')
mSUBr = \
lexeme(string('sub') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('sub', x))\
.desc('sub r instruction')
mANDi8 = \
lexeme(string('and') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('and', x))\
.desc('and i8 instruction')
mANDr = \
lexeme(string('and') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('and', x))\
.desc('and r instruction')
mORi8 = \
lexeme(string('or') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('or', x))\
.desc('or i8 instruction')
mORr = \
lexeme(string('or') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('or', x))\
.desc('or r instruction')
mXORi8 = \
lexeme(string('xor') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('xor', x))\
.desc('xor i8 instruction')
mXORr = \
lexeme(string('xor') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('xor', x))\
.desc('xor r instruction')
mSHLi8 = \
lexeme(string('shl') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('shl', x))\
.desc('shl i8 instruction')
mSHLr = \
lexeme(string('shl') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('shl', x))\
.desc('shl r instruction')
mSHRi8 = \
lexeme(string('shr') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('shr', x))\
.desc('shr i8 instruction')
mSHRr = \
lexeme(string('shr') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('shr', x))\
.desc('shr r instruction')
logictest = lexeme(string('nz') | string('z') | string('nc') | string('c') | \
string('nn') | string('o') | string('no') | string('n'))
labelarg = colon + ident
jmpregargs = ((paramreg * 2)).concat()
jmpaddrarg = lexeme(jmpregargs | labelarg)
jmraddrarg = lexeme(paramreg | labelarg)
mJMP = \
lexeme(seq(
string('jmp') >> sep >> logictest << comma,
jmpaddrarg).combine(JMP))\
.desc('jmp instruction')
mJMR = \
lexeme(seq(
string('jmr') >> sep >> logictest << comma,
jmraddrarg).combine(JMR))\
.desc('jmr instruction')
mPSH = lexeme(string('psh') >> sep >> paramreg)\
.map(PSH)\
.desc('psh instruction')
mPOP = lexeme(string('pop') >> sep >> paramreg)\
.map(POP)\
.desc('pop instruction')
mCAL = lexeme(string('cal') >> sep >> jmpaddrarg)\
.map(CAL)\
.desc('cal instruction')
mRET = lexeme(string('ret'))\
.map(RET)\
.desc('ret instruction')
inreg = lexeme(hash >> (heximm4 | paramreg))
outreg = lexeme(heximm8 | paramreg)
mIN = \
lexeme(seq(
string('in') >> sep >> paramreg << comma << ignore,
inreg).combine(IN))\
.desc('in instruction')
mOUT = \
lexeme(seq(
string('out') >> sep >> hash >> paramreg << comma << ignore,
outreg).combine(OUT))\
.desc('out instruction')
mKIL = lexeme(string('kil'))\
.map(KIL)\
.desc('kil instruction')
mCLC = mADDi8 | mADDr | mSUBi8 | mSUBr | mANDi8 | mANDr | mORi8 | mORr | \
mXORi8 | mXORr | mSHLi8 | mSHLr | mSHRi8 | mSHRr
dORG = (lexeme(string('.org')) >> heximm16)\
.map(ORG)\
.desc('.org directive')
dDB = (lexeme(string('.db')) >> paramdb.sep_by(comma))\
.map(DB)\
.desc('.db directive')
dMV = lexeme(seq(lexeme(string('.mv')) >> lexeme((paramreg * 2).concat()) << comma,
labelarg).combine(MV))\
.desc('.mv directive')
mnemonic = mNOP | mINC | mDEC | mNOT | mMOVri8 | mMOVrr | mMOVrm | mMOVmr | mCLC | mJMP | mJMR | mKIL | mCAL | mRET | mIN | mOUT | mPSH | mPOP
directive = dORG | dDB | dMV
label = lexeme(ident << colon)
instruction = mnemonic | directive
linecomment = (ignore >> comment).map(LineComment)
line = (linecomment | (ignore >> seq(label.optional(), instruction, comment.optional()).combine(LINE))) << nl
program = Parser.many(line)
| 26.882591 | 142 | 0.578163 | from parsy import regex, Parser, string, seq, letter, digit
from bc81asmc_ast import *
hexstr2int = lambda x: int(x, 16)
comment = regex(r';[^\r\n]*').desc('comment')
whitespace = regex(r'[ \t]').desc('whitespace')
whitespaces = regex(r'[ \t]*').desc('whitespaces')
ignore = whitespaces
sep = whitespace.at_least(1)
nl = regex(r'(\r\n|\r|\n)').desc('new line')
lexeme = lambda p: p << ignore
colon = lexeme(string(':'))
comma = lexeme(string(','))
hash = string('#')
underscore = string('_')
hexprefix = string('0x')
accumulator = string('a').desc('accumulator')
quote = string("'")
ident = letter + (letter | digit | underscore).many().concat()
quotedstr = lexeme(quote >> regex(r"[^']*") << quote).desc('quoted string')
heximm4 = lexeme(
(hexprefix >> regex(r'[0-9a-fA-F]'))
.map(hexstr2int)
.desc('hex immediate 4bit value')
)
heximm16 = lexeme(
(hexprefix >> regex(r'[0-9a-fA-F]{4}'))
.map(hexstr2int)
.desc('hex immediate 16bit value')
)
heximm8 = lexeme(
(hexprefix >> regex(r'[0-9a-fA-F]{2}'))
.map(hexstr2int)
.desc('hex immediate 8bit value')
)
heximm16 = lexeme(
(hexprefix >> regex(r'[0-9a-fA-F]{4}'))
.map(hexstr2int)
.desc('hex immediate 16bit value')
)
paramreg = (
string('pc') | string('ss') | string('si') | string('f') | string('a') |
string('ci') | string('cs') | string('di') | string('ds')
).desc('register name')
paramdb = lexeme(quotedstr | heximm8)
mNOP = lexeme(string('nop')).map(NOP).desc('nop instruction')
mINC = lexeme(string('inc') >> sep >> accumulator)\
.map(INC)\
.desc('inc instruction')
mDEC = lexeme(string('dec') >> sep >> accumulator)\
.map(DEC)\
.desc('dec instruction')
mNOT = lexeme(string('not') >> sep >> accumulator)\
.map(NOT)\
.desc('not instruction')
mMOVri8 = \
lexeme(string('mov') >> sep >>
seq(
lexeme(paramreg << comma),
heximm8
).combine(MOVRI8)
)\
.desc('mov r,i8 instruction')
mMOVrr = \
lexeme(string('mov') >> sep >>
seq(
lexeme(paramreg << comma),
paramreg
).combine(MOVRR)
)\
.desc('mov r,r instruction')
mMOVrm = \
lexeme(string('mov') >> sep >>
seq(
lexeme(paramreg << comma),
hash >> (paramreg * 2).concat()
).combine(MOVRM)
)\
.desc('mov r,#r instruction')
mMOVmr = \
lexeme(string('mov') >> sep >>
seq(
lexeme(hash >> (paramreg * 2).concat() << comma),
paramreg
).combine(MOVMR)
)\
.desc('mov #r,r instruction')
mADDi8 = \
lexeme(string('add') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('add', x))\
.desc('add i8 instruction')
mADDr = \
lexeme(string('add') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('add', x))\
.desc('add r instruction')
mSUBi8 = \
lexeme(string('sub') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('sub', x))\
.desc('sub i8 instruction')
mSUBr = \
lexeme(string('sub') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('sub', x))\
.desc('sub r instruction')
mANDi8 = \
lexeme(string('and') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('and', x))\
.desc('and i8 instruction')
mANDr = \
lexeme(string('and') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('and', x))\
.desc('and r instruction')
mORi8 = \
lexeme(string('or') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('or', x))\
.desc('or i8 instruction')
mORr = \
lexeme(string('or') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('or', x))\
.desc('or r instruction')
mXORi8 = \
lexeme(string('xor') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('xor', x))\
.desc('xor i8 instruction')
mXORr = \
lexeme(string('xor') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('xor', x))\
.desc('xor r instruction')
mSHLi8 = \
lexeme(string('shl') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('shl', x))\
.desc('shl i8 instruction')
mSHLr = \
lexeme(string('shl') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('shl', x))\
.desc('shl r instruction')
mSHRi8 = \
lexeme(string('shr') >> sep >> heximm8)\
.map(lambda x: CLC_A_IMM('shr', x))\
.desc('shr i8 instruction')
mSHRr = \
lexeme(string('shr') >> sep >> paramreg)\
.map(lambda x: CLC_A_R('shr', x))\
.desc('shr r instruction')
logictest = lexeme(string('nz') | string('z') | string('nc') | string('c') | \
string('nn') | string('o') | string('no') | string('n'))
labelarg = colon + ident
jmpregargs = ((paramreg * 2)).concat()
jmpaddrarg = lexeme(jmpregargs | labelarg)
jmraddrarg = lexeme(paramreg | labelarg)
mJMP = \
lexeme(seq(
string('jmp') >> sep >> logictest << comma,
jmpaddrarg).combine(JMP))\
.desc('jmp instruction')
mJMR = \
lexeme(seq(
string('jmr') >> sep >> logictest << comma,
jmraddrarg).combine(JMR))\
.desc('jmr instruction')
mPSH = lexeme(string('psh') >> sep >> paramreg)\
.map(PSH)\
.desc('psh instruction')
mPOP = lexeme(string('pop') >> sep >> paramreg)\
.map(POP)\
.desc('pop instruction')
mCAL = lexeme(string('cal') >> sep >> jmpaddrarg)\
.map(CAL)\
.desc('cal instruction')
mRET = lexeme(string('ret'))\
.map(RET)\
.desc('ret instruction')
inreg = lexeme(hash >> (heximm4 | paramreg))
outreg = lexeme(heximm8 | paramreg)
mIN = \
lexeme(seq(
string('in') >> sep >> paramreg << comma << ignore,
inreg).combine(IN))\
.desc('in instruction')
mOUT = \
lexeme(seq(
string('out') >> sep >> hash >> paramreg << comma << ignore,
outreg).combine(OUT))\
.desc('out instruction')
mKIL = lexeme(string('kil'))\
.map(KIL)\
.desc('kil instruction')
mCLC = mADDi8 | mADDr | mSUBi8 | mSUBr | mANDi8 | mANDr | mORi8 | mORr | \
mXORi8 | mXORr | mSHLi8 | mSHLr | mSHRi8 | mSHRr
dORG = (lexeme(string('.org')) >> heximm16)\
.map(ORG)\
.desc('.org directive')
dDB = (lexeme(string('.db')) >> paramdb.sep_by(comma))\
.map(DB)\
.desc('.db directive')
dMV = lexeme(seq(lexeme(string('.mv')) >> lexeme((paramreg * 2).concat()) << comma,
labelarg).combine(MV))\
.desc('.mv directive')
mnemonic = mNOP | mINC | mDEC | mNOT | mMOVri8 | mMOVrr | mMOVrm | mMOVmr | mCLC | mJMP | mJMR | mKIL | mCAL | mRET | mIN | mOUT | mPSH | mPOP
directive = dORG | dDB | dMV
label = lexeme(ident << colon)
instruction = mnemonic | directive
linecomment = (ignore >> comment).map(LineComment)
line = (linecomment | (ignore >> seq(label.optional(), instruction, comment.optional()).combine(LINE))) << nl
program = Parser.many(line)
| true | true |
f7ff40a2f77b4d301dd03bef5daa9c5977b0a75f | 3,739 | py | Python | generated-libraries/python/netapp/ses/shelf_bay_port_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/ses/shelf_bay_port_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/ses/shelf_bay_port_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | from netapp.netapp_object import NetAppObject
class ShelfBayPortInfo(NetAppObject):
"""
Shelf bay port specific information.
"""
_disk_name = None
@property
def disk_name(self):
"""
if port-designator is "disk_bay" and there is a disk installed
in the bay, then this will be the disk name. Otherwise the
field will be missing.
"""
return self._disk_name
@disk_name.setter
def disk_name(self, val):
if val != None:
self.validate('disk_name', val)
self._disk_name = val
_port_state = None
@property
def port_state(self):
"""
Current port state. Possible values are: "ok", "empty",
"unkwn_lnk", "no_signal", "unused", "unkwn", "unknown",
"dis_man", "dis_unusd", "dis_smp", "dis_loswd", "dis_dispa",
"dis_invwd", "dis_reset", "dis_phchg", "dis_mir", "dis_crc",
"dis_clk",
"byp_init", "byp_gen", "byp_man", "byp_xmit", "byp_lipf8",
"byp_dto", "byp_rlos", "byp_clos", "byp_tbi", "byp_rprt",
"byp_stall", "byp_wrd", "byp_crc", "byp_lip", "byp_osc",
"byp_clk", "byp_mir", "byp_lipf7", "byp_bzr", "byp_self",
"byp_flt", "byp_pwr", "byp_pcycl",
"warn_lip", "warn_wrdb", "warn_wrd", "warn_crc", "warn_clk",
"term-err", "term", "autoterm".
"""
return self._port_state
@port_state.setter
def port_state(self, val):
if val != None:
self.validate('port_state', val)
self._port_state = val
_disk_uid = None
@property
def disk_uid(self):
"""
if port-designator is "disk_bay" and there is a disk installed
in the bay, then this will be UID of the disk. Otherwise the
field will be missing.
"""
return self._disk_uid
@disk_uid.setter
def disk_uid(self, val):
if val != None:
self.validate('disk_uid', val)
self._disk_uid = val
_bay_no = None
@property
def bay_no(self):
"""
Disk bay number or port number, if applicable. In some
instances bay numbers do apply and will not be present. An
example is an ESH shelf with a single "in" and a single "out"
port.
"""
return self._bay_no
@bay_no.setter
def bay_no(self, val):
if val != None:
self.validate('bay_no', val)
self._bay_no = val
_port_designator = None
@property
def port_designator(self):
"""
Shelf bay port designator. Possible values:
"in", "out", "aux", "sqr", "cir", "sil", "hi_ho",
"a_to_b", "b_to_a", "disk_bay".
"""
return self._port_designator
@port_designator.setter
def port_designator(self, val):
if val != None:
self.validate('port_designator', val)
self._port_designator = val
@staticmethod
def get_api_name():
return "shelf-bay-port-info"
@staticmethod
def get_desired_attrs():
return [
'disk-name',
'port-state',
'disk-uid',
'bay-no',
'port-designator',
]
def describe_properties(self):
return {
'disk_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'port_state': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'disk_uid': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'bay_no': { 'class': int, 'is_list': False, 'required': 'optional' },
'port_designator': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| 32.513043 | 97 | 0.561113 | from netapp.netapp_object import NetAppObject
class ShelfBayPortInfo(NetAppObject):
_disk_name = None
@property
def disk_name(self):
return self._disk_name
@disk_name.setter
def disk_name(self, val):
if val != None:
self.validate('disk_name', val)
self._disk_name = val
_port_state = None
@property
def port_state(self):
return self._port_state
@port_state.setter
def port_state(self, val):
if val != None:
self.validate('port_state', val)
self._port_state = val
_disk_uid = None
@property
def disk_uid(self):
return self._disk_uid
@disk_uid.setter
def disk_uid(self, val):
if val != None:
self.validate('disk_uid', val)
self._disk_uid = val
_bay_no = None
@property
def bay_no(self):
return self._bay_no
@bay_no.setter
def bay_no(self, val):
if val != None:
self.validate('bay_no', val)
self._bay_no = val
_port_designator = None
@property
def port_designator(self):
return self._port_designator
@port_designator.setter
def port_designator(self, val):
if val != None:
self.validate('port_designator', val)
self._port_designator = val
@staticmethod
def get_api_name():
return "shelf-bay-port-info"
@staticmethod
def get_desired_attrs():
return [
'disk-name',
'port-state',
'disk-uid',
'bay-no',
'port-designator',
]
def describe_properties(self):
return {
'disk_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'port_state': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'disk_uid': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'bay_no': { 'class': int, 'is_list': False, 'required': 'optional' },
'port_designator': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| true | true |
f7ff43737a58c3099b07e301a3a7eb95e4ae8cc4 | 2,621 | py | Python | pyfos/utils/zoning/zoning_cfg_show.py | sandeepv451/Pyfostest | e2c64bcb826b5aaf870c5adc9791a8c3a0fa21fa | [
"Apache-2.0"
] | null | null | null | pyfos/utils/zoning/zoning_cfg_show.py | sandeepv451/Pyfostest | e2c64bcb826b5aaf870c5adc9791a8c3a0fa21fa | [
"Apache-2.0"
] | null | null | null | pyfos/utils/zoning/zoning_cfg_show.py | sandeepv451/Pyfostest | e2c64bcb826b5aaf870c5adc9791a8c3a0fa21fa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`zoning_cfg_show` - PyFOS util for specific Zoning use case.
***********************************************************************************
The :mod:`zoning_cfg_show` provides for specific Zoning use case.
This module is a standalone script to display Zone DB.
* inputs:
* -L=<login>: Login ID. If not provided, interactive
prompt will request one.
* -P=<password>: Password. If not provided, interactive
prompt will request one.
* -i=<IP address>: IP address
* -f=<VFID>: VFID or -1 if VF is disabled. If unspecified,
VFID of 128 is assumed.
* outputs:
* Python dictionary content with RESTCONF response data
"""
import pyfos.pyfos_auth as pyfos_auth
import pyfos.pyfos_brocade_zone as pyfos_zone
import pyfos.pyfos_util as pyfos_util
import sys
import pyfos.utils.brcd_util as brcd_util
def usage():
print("")
def main(argv):
valid_options = []
inputs = brcd_util.generic_input(argv, usage, valid_options)
session = pyfos_auth.login(inputs["login"], inputs["password"],
inputs["ipaddr"], inputs["secured"],
verbose=inputs["verbose"])
if pyfos_auth.is_failed_login(session):
print("login failed because",
session.get(pyfos_auth.CREDENTIAL_KEY)
[pyfos_auth.LOGIN_ERROR_KEY])
brcd_util.full_usage(usage)
sys.exit()
brcd_util.exit_register(session)
vfid = None
if 'vfid' in inputs:
vfid = inputs['vfid']
if vfid is not None:
pyfos_auth.vfid_set(session, vfid)
defined_zone = pyfos_zone.defined_configuration.get(session)
pyfos_util.response_print(defined_zone)
effective_zone = pyfos_zone.effective_configuration.get(session)
pyfos_util.response_print(effective_zone)
# options = effective_zone.options(session)
# print(options)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| 30.476744 | 83 | 0.675315 |
import pyfos.pyfos_auth as pyfos_auth
import pyfos.pyfos_brocade_zone as pyfos_zone
import pyfos.pyfos_util as pyfos_util
import sys
import pyfos.utils.brcd_util as brcd_util
def usage():
print("")
def main(argv):
valid_options = []
inputs = brcd_util.generic_input(argv, usage, valid_options)
session = pyfos_auth.login(inputs["login"], inputs["password"],
inputs["ipaddr"], inputs["secured"],
verbose=inputs["verbose"])
if pyfos_auth.is_failed_login(session):
print("login failed because",
session.get(pyfos_auth.CREDENTIAL_KEY)
[pyfos_auth.LOGIN_ERROR_KEY])
brcd_util.full_usage(usage)
sys.exit()
brcd_util.exit_register(session)
vfid = None
if 'vfid' in inputs:
vfid = inputs['vfid']
if vfid is not None:
pyfos_auth.vfid_set(session, vfid)
defined_zone = pyfos_zone.defined_configuration.get(session)
pyfos_util.response_print(defined_zone)
effective_zone = pyfos_zone.effective_configuration.get(session)
pyfos_util.response_print(effective_zone)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| true | true |
f7ff43b052bd154c29a95c60bd148c6bf8bf0a10 | 1,440 | py | Python | project/reports/global_warming/myutils.py | vinceHardy/learning | 941e5979d471567411e7593c36617ef4a8e47f70 | [
"MIT"
] | 1 | 2019-11-05T06:17:40.000Z | 2019-11-05T06:17:40.000Z | project/reports/global_warming/myutils.py | johnqoe/ntds_2016 | 2c207029e7c93807fe57b0a4ae098c8afe38a661 | [
"MIT"
] | null | null | null | project/reports/global_warming/myutils.py | johnqoe/ntds_2016 | 2c207029e7c93807fe57b0a4ae098c8afe38a661 | [
"MIT"
] | null | null | null | import pandas as pd
import os.path
import matplotlib.pyplot as plt
def makeTimeSeries(df):
ts = pd.to_datetime(df.dt)
df.index = ts
return df.drop('dt', axis=1)
def differenciate(X):
diff = list()
for i in range(1, len(X)):
value = X[i] - X[i - 1]
diff.append(value)
X_diff=pd.DataFrame(diff)
X_diff.index=X.index[1:]
X_diff=X_diff[0]
return X_diff
from statsmodels.tsa.stattools import adfuller
def test_stationarity(timeseries):
#Determing rolling statistics
rolmean = pd.rolling_mean(timeseries, window=10)
rolstd = pd.rolling_std(timeseries, window=10)
#Plot rolling statistics:
plt.figure(figsize=(16,8))
orig = plt.plot(timeseries, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.xlabel('years',fontsize=16)
plt.ylabel('Temperature, °C',fontsize=16)
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation',fontsize=24)
plt.show(block=False)
#Perform Dickey-Fuller test:
print('Results of Dickey-Fuller Test:')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print(dfoutput) | 32.727273 | 116 | 0.665972 | import pandas as pd
import os.path
import matplotlib.pyplot as plt
def makeTimeSeries(df):
ts = pd.to_datetime(df.dt)
df.index = ts
return df.drop('dt', axis=1)
def differenciate(X):
diff = list()
for i in range(1, len(X)):
value = X[i] - X[i - 1]
diff.append(value)
X_diff=pd.DataFrame(diff)
X_diff.index=X.index[1:]
X_diff=X_diff[0]
return X_diff
from statsmodels.tsa.stattools import adfuller
def test_stationarity(timeseries):
rolmean = pd.rolling_mean(timeseries, window=10)
rolstd = pd.rolling_std(timeseries, window=10)
plt.figure(figsize=(16,8))
orig = plt.plot(timeseries, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.xlabel('years',fontsize=16)
plt.ylabel('Temperature, °C',fontsize=16)
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation',fontsize=24)
plt.show(block=False)
print('Results of Dickey-Fuller Test:')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print(dfoutput) | true | true |
f7ff4421e76f3ecb984ac90dadab9ff91e452bf2 | 521 | py | Python | env/lib/python3.8/site-packages/plotly/validators/scattercarpet/_visible.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/scattercarpet/_visible.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/scattercarpet/_visible.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="scattercarpet", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
| 37.214286 | 85 | 0.648752 | import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="scattercarpet", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
| true | true |
f7ff4740953c7f9a66ab8e918dcccc71a45b7c86 | 6,016 | py | Python | kubernetes_asyncio/client/models/v1_label_selector_requirement.py | playground-julia/kubernetes_asyncio | 91b2c41eedd282d9ebc059377fb7f207e220133d | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_label_selector_requirement.py | playground-julia/kubernetes_asyncio | 91b2c41eedd282d9ebc059377fb7f207e220133d | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_label_selector_requirement.py | playground-julia/kubernetes_asyncio | 91b2c41eedd282d9ebc059377fb7f207e220133d | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.15.9
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1LabelSelectorRequirement(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'key': 'str',
'operator': 'str',
'values': 'list[str]'
}
attribute_map = {
'key': 'key',
'operator': 'operator',
'values': 'values'
}
def __init__(self, key=None, operator=None, values=None, local_vars_configuration=None): # noqa: E501
"""V1LabelSelectorRequirement - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self._operator = None
self._values = None
self.discriminator = None
self.key = key
self.operator = operator
if values is not None:
self.values = values
@property
def key(self):
"""Gets the key of this V1LabelSelectorRequirement. # noqa: E501
key is the label key that the selector applies to. # noqa: E501
:return: The key of this V1LabelSelectorRequirement. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this V1LabelSelectorRequirement.
key is the label key that the selector applies to. # noqa: E501
:param key: The key of this V1LabelSelectorRequirement. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
self._key = key
@property
def operator(self):
"""Gets the operator of this V1LabelSelectorRequirement. # noqa: E501
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. # noqa: E501
:return: The operator of this V1LabelSelectorRequirement. # noqa: E501
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""Sets the operator of this V1LabelSelectorRequirement.
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. # noqa: E501
:param operator: The operator of this V1LabelSelectorRequirement. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and operator is None: # noqa: E501
raise ValueError("Invalid value for `operator`, must not be `None`") # noqa: E501
self._operator = operator
@property
def values(self):
"""Gets the values of this V1LabelSelectorRequirement. # noqa: E501
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. # noqa: E501
:return: The values of this V1LabelSelectorRequirement. # noqa: E501
:rtype: list[str]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this V1LabelSelectorRequirement.
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. # noqa: E501
:param values: The values of this V1LabelSelectorRequirement. # noqa: E501
:type: list[str]
"""
self._values = values
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1LabelSelectorRequirement):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1LabelSelectorRequirement):
return True
return self.to_dict() != other.to_dict()
| 33.237569 | 256 | 0.61619 |
import pprint
import re
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1LabelSelectorRequirement(object):
openapi_types = {
'key': 'str',
'operator': 'str',
'values': 'list[str]'
}
attribute_map = {
'key': 'key',
'operator': 'operator',
'values': 'values'
}
def __init__(self, key=None, operator=None, values=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self._operator = None
self._values = None
self.discriminator = None
self.key = key
self.operator = operator
if values is not None:
self.values = values
@property
def key(self):
return self._key
@key.setter
def key(self, key):
if self.local_vars_configuration.client_side_validation and key is None:
raise ValueError("Invalid value for `key`, must not be `None`")
self._key = key
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, operator):
if self.local_vars_configuration.client_side_validation and operator is None:
raise ValueError("Invalid value for `operator`, must not be `None`")
self._operator = operator
@property
def values(self):
return self._values
@values.setter
def values(self, values):
self._values = values
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1LabelSelectorRequirement):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1LabelSelectorRequirement):
return True
return self.to_dict() != other.to_dict()
| true | true |
f7ff477a25d2d3bc9a7eecc404b131aa705c6e0d | 3,576 | py | Python | test/ext_tests/test_gasplummer.py | joshuawall/amuse | c2034074ee76c08057c4faa96c32044ab40952e9 | [
"Apache-2.0"
] | 1 | 2019-12-28T22:47:51.000Z | 2019-12-28T22:47:51.000Z | test/ext_tests/test_gasplummer.py | joshuawall/amuse | c2034074ee76c08057c4faa96c32044ab40952e9 | [
"Apache-2.0"
] | null | null | null | test/ext_tests/test_gasplummer.py | joshuawall/amuse | c2034074ee76c08057c4faa96c32044ab40952e9 | [
"Apache-2.0"
] | 2 | 2021-11-19T04:41:37.000Z | 2021-11-20T02:11:17.000Z | import numpy
from amuse.test import amusetest
from amuse.units import nbody_system
from amuse.units import units
from amuse.ic.gasplummer import new_plummer_gas_model, MakePlummerGasModel
class TestPlummerGasModel(amusetest.TestCase):
def test1(self):
print "Test 1: testing low-level interface (no units or datamodel)"
numpy.random.seed(345672)
mpgm = MakePlummerGasModel(2)
mass, x,y,z, vx,vy,vz, u = mpgm.new_model()
self.assertEquals(mass[0], 0.5)
self.assertEquals(mass[1], 0.5)
self.assertAlmostEqual(x, [-0.02295788, 0.12829775])
self.assertAlmostEqual(y, [-0.41054985, 0.14190860])
self.assertAlmostEqual(z, [-0.50723639, 0.08937734])
self.assertAlmostEqual(vx, [0.0, 0.0])
self.assertAlmostEqual(vy, [0.0, 0.0])
self.assertAlmostEqual(vz, [0.0, 0.0])
self.assertAlmostEqual(u, [0.28413716, 0.39898137])
def test2(self):
print "Test 2: testing user interface, with convert_nbody -> SI units"
convert_nbody = nbody_system.nbody_to_si(6|units.kg, 7 | units.m)
gas = new_plummer_gas_model(2, convert_nbody)
self.assertEquals(gas[0].mass.value_in(units.kg), 3.0)
self.assertEquals(gas[1].mass.value_in(units.kg), 3.0)
def test3(self):
print "Test 3: testing user interface, without convert_nbody -> nbody units"
gas = new_plummer_gas_model(2, None)
self.assertEquals(gas[0].mass.value_in(nbody_system.mass), 0.5)
self.assertEquals(gas[1].mass.value_in(nbody_system.mass), 0.5)
def test4(self):
print "Test 4: test new_plummer_gas_model, model properties"
numpy.random.seed(345672)
gas = new_plummer_gas_model(100)
self.assertEqual(len(gas), 100)
self.assertAlmostEqual(gas.kinetic_energy(), 0.00 | nbody_system.energy)
self.assertIsOfOrder( gas.thermal_energy(), 0.25 | nbody_system.energy)
self.assertAlmostEqual(gas.thermal_energy(), 0.238075609078 | nbody_system.energy)
self.assertIsOfOrder( gas.potential_energy(G=nbody_system.G), -0.50 | nbody_system.energy)
self.assertAlmostEqual(gas.potential_energy(G=nbody_system.G), -0.447052244411 | nbody_system.energy)
self.assertAlmostEqual(gas.center_of_mass(), [0,0,0] | nbody_system.length)
self.assertAlmostEqual(gas.center_of_mass_velocity(), [0,0,0] | nbody_system.speed)
self.assertAlmostEqual(gas.total_mass(), 1.00 | nbody_system.mass)
self.assertIsOfOrder(gas.virial_radius(), 1.00 | nbody_system.length)
self.assertAlmostEqual(gas.virial_radius(), 1.11843751206 | nbody_system.length)
def test5(self):
print "Test 5: test new_plummer_gas_model with do_scale"
gas = new_plummer_gas_model(100, do_scale = True)
self.assertEqual(len(gas), 100)
self.assertAlmostEqual(gas.kinetic_energy(), 0.00 | nbody_system.energy)
self.assertAlmostEqual(gas.thermal_energy(), 0.25 | nbody_system.energy)
self.assertAlmostEqual(gas.potential_energy(G=nbody_system.G), -0.50 | nbody_system.energy)
self.assertAlmostEqual(gas.center_of_mass(), [0,0,0] | nbody_system.length)
self.assertAlmostEqual(gas.center_of_mass_velocity(), [0,0,0] | nbody_system.speed)
self.assertAlmostEqual(gas.total_mass(), 1.00 | nbody_system.mass)
self.assertAlmostEqual(gas.virial_radius(), 1.00 | nbody_system.length)
| 51.826087 | 109 | 0.670022 | import numpy
from amuse.test import amusetest
from amuse.units import nbody_system
from amuse.units import units
from amuse.ic.gasplummer import new_plummer_gas_model, MakePlummerGasModel
class TestPlummerGasModel(amusetest.TestCase):
def test1(self):
print "Test 1: testing low-level interface (no units or datamodel)"
numpy.random.seed(345672)
mpgm = MakePlummerGasModel(2)
mass, x,y,z, vx,vy,vz, u = mpgm.new_model()
self.assertEquals(mass[0], 0.5)
self.assertEquals(mass[1], 0.5)
self.assertAlmostEqual(x, [-0.02295788, 0.12829775])
self.assertAlmostEqual(y, [-0.41054985, 0.14190860])
self.assertAlmostEqual(z, [-0.50723639, 0.08937734])
self.assertAlmostEqual(vx, [0.0, 0.0])
self.assertAlmostEqual(vy, [0.0, 0.0])
self.assertAlmostEqual(vz, [0.0, 0.0])
self.assertAlmostEqual(u, [0.28413716, 0.39898137])
def test2(self):
print "Test 2: testing user interface, with convert_nbody -> SI units"
convert_nbody = nbody_system.nbody_to_si(6|units.kg, 7 | units.m)
gas = new_plummer_gas_model(2, convert_nbody)
self.assertEquals(gas[0].mass.value_in(units.kg), 3.0)
self.assertEquals(gas[1].mass.value_in(units.kg), 3.0)
def test3(self):
print "Test 3: testing user interface, without convert_nbody -> nbody units"
gas = new_plummer_gas_model(2, None)
self.assertEquals(gas[0].mass.value_in(nbody_system.mass), 0.5)
self.assertEquals(gas[1].mass.value_in(nbody_system.mass), 0.5)
def test4(self):
print "Test 4: test new_plummer_gas_model, model properties"
numpy.random.seed(345672)
gas = new_plummer_gas_model(100)
self.assertEqual(len(gas), 100)
self.assertAlmostEqual(gas.kinetic_energy(), 0.00 | nbody_system.energy)
self.assertIsOfOrder( gas.thermal_energy(), 0.25 | nbody_system.energy)
self.assertAlmostEqual(gas.thermal_energy(), 0.238075609078 | nbody_system.energy)
self.assertIsOfOrder( gas.potential_energy(G=nbody_system.G), -0.50 | nbody_system.energy)
self.assertAlmostEqual(gas.potential_energy(G=nbody_system.G), -0.447052244411 | nbody_system.energy)
self.assertAlmostEqual(gas.center_of_mass(), [0,0,0] | nbody_system.length)
self.assertAlmostEqual(gas.center_of_mass_velocity(), [0,0,0] | nbody_system.speed)
self.assertAlmostEqual(gas.total_mass(), 1.00 | nbody_system.mass)
self.assertIsOfOrder(gas.virial_radius(), 1.00 | nbody_system.length)
self.assertAlmostEqual(gas.virial_radius(), 1.11843751206 | nbody_system.length)
def test5(self):
print "Test 5: test new_plummer_gas_model with do_scale"
gas = new_plummer_gas_model(100, do_scale = True)
self.assertEqual(len(gas), 100)
self.assertAlmostEqual(gas.kinetic_energy(), 0.00 | nbody_system.energy)
self.assertAlmostEqual(gas.thermal_energy(), 0.25 | nbody_system.energy)
self.assertAlmostEqual(gas.potential_energy(G=nbody_system.G), -0.50 | nbody_system.energy)
self.assertAlmostEqual(gas.center_of_mass(), [0,0,0] | nbody_system.length)
self.assertAlmostEqual(gas.center_of_mass_velocity(), [0,0,0] | nbody_system.speed)
self.assertAlmostEqual(gas.total_mass(), 1.00 | nbody_system.mass)
self.assertAlmostEqual(gas.virial_radius(), 1.00 | nbody_system.length)
| false | true |
f7ff491d28423bb8ce76dba51252d4f33c3eab19 | 2,539 | py | Python | jrnl/plugins/fancy_exporter.py | aallbrig/jrnl | 95aa3bc1aec9da88678981741f859de5e54cc52a | [
"MIT"
] | null | null | null | jrnl/plugins/fancy_exporter.py | aallbrig/jrnl | 95aa3bc1aec9da88678981741f859de5e54cc52a | [
"MIT"
] | null | null | null | jrnl/plugins/fancy_exporter.py | aallbrig/jrnl | 95aa3bc1aec9da88678981741f859de5e54cc52a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import, unicode_literals, print_function
from .text_exporter import TextExporter
from textwrap import TextWrapper
class FancyExporter(TextExporter):
"""This Exporter can convert entries and journals into text with unicode box drawing characters."""
names = ["fancy", "boxed"]
extension = "txt"
border_a = "┎"
border_b = "─"
border_c = "╮"
border_d = "╘"
border_e = "═"
border_f = "╕"
border_g = "┃"
border_h = "│"
border_i = "┠"
border_j = "╌"
border_k = "┤"
border_l = "┖"
border_m = "┘"
@classmethod
def export_entry(cls, entry):
"""Returns a fancy unicode representation of a single entry."""
date_str = entry.date.strftime(entry.journal.config["timeformat"])
linewrap = entry.journal.config["linewrap"] or 78
initial_linewrap = linewrap - len(date_str) - 2
body_linewrap = linewrap - 2
card = [
cls.border_a + cls.border_b * (initial_linewrap) + cls.border_c + date_str
]
w = TextWrapper(
width=initial_linewrap,
initial_indent=cls.border_g + " ",
subsequent_indent=cls.border_g + " ",
)
title_lines = w.wrap(entry.title)
card.append(
title_lines[0].ljust(initial_linewrap + 1)
+ cls.border_d
+ cls.border_e * (len(date_str) - 1)
+ cls.border_f
)
w.width = body_linewrap
if len(title_lines) > 1:
for line in w.wrap(
" ".join(
[
title_line[len(w.subsequent_indent) :]
for title_line in title_lines[1:]
]
)
):
card.append(line.ljust(body_linewrap + 1) + cls.border_h)
if entry.body:
card.append(cls.border_i + cls.border_j * body_linewrap + cls.border_k)
for line in entry.body.splitlines():
body_lines = w.wrap(line) or [cls.border_g]
for body_line in body_lines:
card.append(body_line.ljust(body_linewrap + 1) + cls.border_h)
card.append(cls.border_l + cls.border_b * body_linewrap + cls.border_m)
return "\n".join(card)
@classmethod
def export_journal(cls, journal):
"""Returns a unicode representation of an entire journal."""
return "\n".join(cls.export_entry(entry) for entry in journal)
| 33.853333 | 103 | 0.570303 |
from __future__ import absolute_import, unicode_literals, print_function
from .text_exporter import TextExporter
from textwrap import TextWrapper
class FancyExporter(TextExporter):
names = ["fancy", "boxed"]
extension = "txt"
border_a = "┎"
border_b = "─"
border_c = "╮"
border_d = "╘"
border_e = "═"
border_f = "╕"
border_g = "┃"
border_h = "│"
border_i = "┠"
border_j = "╌"
border_k = "┤"
border_l = "┖"
border_m = "┘"
@classmethod
def export_entry(cls, entry):
date_str = entry.date.strftime(entry.journal.config["timeformat"])
linewrap = entry.journal.config["linewrap"] or 78
initial_linewrap = linewrap - len(date_str) - 2
body_linewrap = linewrap - 2
card = [
cls.border_a + cls.border_b * (initial_linewrap) + cls.border_c + date_str
]
w = TextWrapper(
width=initial_linewrap,
initial_indent=cls.border_g + " ",
subsequent_indent=cls.border_g + " ",
)
title_lines = w.wrap(entry.title)
card.append(
title_lines[0].ljust(initial_linewrap + 1)
+ cls.border_d
+ cls.border_e * (len(date_str) - 1)
+ cls.border_f
)
w.width = body_linewrap
if len(title_lines) > 1:
for line in w.wrap(
" ".join(
[
title_line[len(w.subsequent_indent) :]
for title_line in title_lines[1:]
]
)
):
card.append(line.ljust(body_linewrap + 1) + cls.border_h)
if entry.body:
card.append(cls.border_i + cls.border_j * body_linewrap + cls.border_k)
for line in entry.body.splitlines():
body_lines = w.wrap(line) or [cls.border_g]
for body_line in body_lines:
card.append(body_line.ljust(body_linewrap + 1) + cls.border_h)
card.append(cls.border_l + cls.border_b * body_linewrap + cls.border_m)
return "\n".join(card)
@classmethod
def export_journal(cls, journal):
return "\n".join(cls.export_entry(entry) for entry in journal)
| true | true |
f7ff4970d703cf1fa8240f47ba515a3f6ea19618 | 3,145 | py | Python | leaky/generator/generate.py | cypher-me/HAS-Qualifier-Challenges | bb795303716155dad4a930880a58fecb5d9b50c5 | [
"MIT"
] | 75 | 2020-07-20T20:54:00.000Z | 2022-03-09T09:18:37.000Z | leaky/generator/generate.py | cypher-me/HAS-Qualifier-Challenges | bb795303716155dad4a930880a58fecb5d9b50c5 | [
"MIT"
] | 3 | 2020-09-13T00:46:49.000Z | 2021-07-06T16:18:22.000Z | leaky/generator/generate.py | cypher-me/HAS-Qualifier-Challenges | bb795303716155dad4a930880a58fecb5d9b50c5 | [
"MIT"
] | 14 | 2020-07-22T16:34:51.000Z | 2021-09-13T12:19:59.000Z | import os,stat,sys
import subprocess
import struct
import tarfile
from pprint import pprint
from hashlib import sha256
from binascii import unhexlify
from pool import ThreadPool
from Crypto.Cipher import AES
def run(count, seed):
cmd = [ "python", "generate_part.py", "%d" % count, "%d" % seed ]
p = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.wait()
return b"".join(p.stdout.readlines())
if __name__ == "__main__":
seed = os.getenv("SEED", "0")
salt = os.getenv("SALT", "Salting isn't really needed, just doing it on the off-chance seeds are leaked by accident")
workerCount = int(os.getenv("WORKERS", "4"))
count = int(os.getenv("COUNT", "100000"))
h = sha256()
h.update(salt.encode('utf-8'))
h.update(seed.encode('utf-8'))
key = h.digest()[:16]
sys.stderr.write("Key Is: %s\n" %key.hex())
sys.stderr.flush()
with open("./chal.exe", "rb") as f:
data = f.read()
data = data.replace(b"\xde\xad\xbe\xef"*4, key)
fileOut = "/src/patched.exe"
with open(fileOut, "wb") as f:
f.write(data)
st = os.stat(fileOut)
os.chmod(fileOut, st.st_mode | stat.S_IEXEC)
workers = ThreadPool(workerCount)
tasks = 1024
roundingerror = count - int(count/tasks)*tasks
workers.add_task(run, int(count/tasks) + roundingerror, 0)
for ii in range(1,tasks):
workers.add_task(run, int(count/tasks), ii)
textName = "/tmp/test.txt"
results = workers.get_results()
with open(textName, "wb") as f:
for r in results:
f.write(r)
print(textName)
readmeFile = "/tmp/Readme.txt"
flag = os.getenv("FLAG", "flag{place:holder}")
length = 16 - (len(flag) % 16)
plain = flag + chr(length)*length
cipher = AES.new(key, AES.MODE_ECB)
ctext = cipher.encrypt(plain)
sys.stderr.write("Key is: %s\n" % key.hex())
sys.stderr.flush()
with open(readmeFile, "w") as f:
f.write("""
Hello, fellow space enthusiasts!
I have been tracking a specific satellite and managed to intercept an interesting
piece of data. Unfortunately, the data is encrypted using an AES-128 key with ECB-Mode.
Encrypted Data: %s
Using proprietary documentation, I have learned that the process of generating the
AES key always produces the same first 6 bytes, while the remaining bytes are random:
Key Bytes 0..5: %s
The communication protocol hashes every message into a 128bit digest, which is encrypted
with the satellite key, and sent back as an authenticated ACK. This process fortunately
happens BEFORE the satellite attempts to decrypt and process my message, which it will
immediately drop my message as I cannot encrypt it properly without the key.
I have read about "side channel attacks" on crypto but don't really understand them,
so I'm reaching out to you for help. I know timing data could be important so I've
already used this vulnerability to collect a large data set of encryption times for
various hash values. Please take a look!
\r\n""" % (ctext.hex(), key[0:6].hex()))
print(readmeFile)
| 32.760417 | 122 | 0.67504 | import os,stat,sys
import subprocess
import struct
import tarfile
from pprint import pprint
from hashlib import sha256
from binascii import unhexlify
from pool import ThreadPool
from Crypto.Cipher import AES
def run(count, seed):
cmd = [ "python", "generate_part.py", "%d" % count, "%d" % seed ]
p = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.wait()
return b"".join(p.stdout.readlines())
if __name__ == "__main__":
seed = os.getenv("SEED", "0")
salt = os.getenv("SALT", "Salting isn't really needed, just doing it on the off-chance seeds are leaked by accident")
workerCount = int(os.getenv("WORKERS", "4"))
count = int(os.getenv("COUNT", "100000"))
h = sha256()
h.update(salt.encode('utf-8'))
h.update(seed.encode('utf-8'))
key = h.digest()[:16]
sys.stderr.write("Key Is: %s\n" %key.hex())
sys.stderr.flush()
with open("./chal.exe", "rb") as f:
data = f.read()
data = data.replace(b"\xde\xad\xbe\xef"*4, key)
fileOut = "/src/patched.exe"
with open(fileOut, "wb") as f:
f.write(data)
st = os.stat(fileOut)
os.chmod(fileOut, st.st_mode | stat.S_IEXEC)
workers = ThreadPool(workerCount)
tasks = 1024
roundingerror = count - int(count/tasks)*tasks
workers.add_task(run, int(count/tasks) + roundingerror, 0)
for ii in range(1,tasks):
workers.add_task(run, int(count/tasks), ii)
textName = "/tmp/test.txt"
results = workers.get_results()
with open(textName, "wb") as f:
for r in results:
f.write(r)
print(textName)
readmeFile = "/tmp/Readme.txt"
flag = os.getenv("FLAG", "flag{place:holder}")
length = 16 - (len(flag) % 16)
plain = flag + chr(length)*length
cipher = AES.new(key, AES.MODE_ECB)
ctext = cipher.encrypt(plain)
sys.stderr.write("Key is: %s\n" % key.hex())
sys.stderr.flush()
with open(readmeFile, "w") as f:
f.write("""
Hello, fellow space enthusiasts!
I have been tracking a specific satellite and managed to intercept an interesting
piece of data. Unfortunately, the data is encrypted using an AES-128 key with ECB-Mode.
Encrypted Data: %s
Using proprietary documentation, I have learned that the process of generating the
AES key always produces the same first 6 bytes, while the remaining bytes are random:
Key Bytes 0..5: %s
The communication protocol hashes every message into a 128bit digest, which is encrypted
with the satellite key, and sent back as an authenticated ACK. This process fortunately
happens BEFORE the satellite attempts to decrypt and process my message, which it will
immediately drop my message as I cannot encrypt it properly without the key.
I have read about "side channel attacks" on crypto but don't really understand them,
so I'm reaching out to you for help. I know timing data could be important so I've
already used this vulnerability to collect a large data set of encryption times for
various hash values. Please take a look!
\r\n""" % (ctext.hex(), key[0:6].hex()))
print(readmeFile)
| true | true |
f7ff4981672d5db59358a445c1256f857a8d0354 | 678 | py | Python | adaptivebot.py | coolioasjulio/Rock-Paper-Scissors-Royale | a75dfdb6bbc62c1a6bc493252f79b97f2aecc325 | [
"MIT"
] | null | null | null | adaptivebot.py | coolioasjulio/Rock-Paper-Scissors-Royale | a75dfdb6bbc62c1a6bc493252f79b97f2aecc325 | [
"MIT"
] | null | null | null | adaptivebot.py | coolioasjulio/Rock-Paper-Scissors-Royale | a75dfdb6bbc62c1a6bc493252f79b97f2aecc325 | [
"MIT"
] | null | null | null | # RPS bot
import random
name = 'adaptivebot'
class RPSBot(object):
name = name
def __init__(self):
self.winners = {"R": "P", "P": "S", "S": "R"}
def get_hint(self, other_past, my_past):
is_other_constant = len(set([other_claim for other_claim, other_move in other_past[-2:]])) == 1
return self.winners[other_past[-1][0]] if is_other_constant else random.choice(list(self.winners.keys()))
def get_move(self, other_past, my_past, other_next, my_next):
is_other_honest = all([other_claim == other_move for other_claim, other_move in other_past[-2:]])
return self.winners[other_next] if is_other_honest else my_next | 42.375 | 113 | 0.674041 |
import random
name = 'adaptivebot'
class RPSBot(object):
name = name
def __init__(self):
self.winners = {"R": "P", "P": "S", "S": "R"}
def get_hint(self, other_past, my_past):
is_other_constant = len(set([other_claim for other_claim, other_move in other_past[-2:]])) == 1
return self.winners[other_past[-1][0]] if is_other_constant else random.choice(list(self.winners.keys()))
def get_move(self, other_past, my_past, other_next, my_next):
is_other_honest = all([other_claim == other_move for other_claim, other_move in other_past[-2:]])
return self.winners[other_next] if is_other_honest else my_next | true | true |
f7ff49946392a1cc8689e7a1b43493468f2c07ee | 9,969 | py | Python | auth0/v3/test/management/test_rest.py | eoltean/auth0-python | 656765de6c406e333cb2b4a2d43cc57d3289221d | [
"MIT"
] | null | null | null | auth0/v3/test/management/test_rest.py | eoltean/auth0-python | 656765de6c406e333cb2b4a2d43cc57d3289221d | [
"MIT"
] | null | null | null | auth0/v3/test/management/test_rest.py | eoltean/auth0-python | 656765de6c406e333cb2b4a2d43cc57d3289221d | [
"MIT"
] | null | null | null | import unittest
import json
import mock
from ...management.rest import RestClient
from ...exceptions import Auth0Error
class TestRest(unittest.TestCase):
@mock.patch('requests.get')
def test_get(self, mock_get):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token'}
mock_get.return_value.text = '["a", "b"]'
mock_get.return_value.status_code = 200
response = rc.get('the-url')
mock_get.assert_called_with('the-url', params=None, headers=headers)
self.assertEqual(response, ['a', 'b'])
response = rc.get(url='the/url', params={'A': 'param', 'B': 'param'})
mock_get.assert_called_with('the/url', params={'A': 'param',
'B': 'param'},
headers=headers)
self.assertEqual(response, ['a', 'b'])
mock_get.return_value.text = ''
response = rc.get('the/url')
self.assertEqual(response, '')
@mock.patch('requests.get')
def test_get_errors(self, mock_get):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token'}
mock_get.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_get.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.get('the/url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.post')
def test_post(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token',
'Content-Type': 'application/json'}
mock_post.return_value.text = '{"a": "b"}'
data = {'some': 'data'}
mock_post.return_value.status_code = 200
response = rc.post('the/url', data=data)
mock_post.assert_called_with('the/url', data=json.dumps(data),
headers=headers)
self.assertEqual(response, {'a': 'b'})
@mock.patch('requests.post')
def test_post_errors(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
mock_post.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_post.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.post')
def test_post_errors_with_no_message_property(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
mock_post.return_value.text = json.dumps({
"statusCode": 999,
"errorCode": "code",
"error": "error"
})
mock_post.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'error')
@mock.patch('requests.post')
def test_post_errors_with_no_message_or_error_property(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
mock_post.return_value.text = json.dumps({
"statusCode": 999,
"errorCode": "code"
})
mock_post.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, '')
@mock.patch('requests.post')
def test_post_errors_with_message_and_error_property(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
mock_post.return_value.text = json.dumps({
"statusCode": 999,
"errorCode": "code",
"error": "error",
"message": "message"
})
mock_post.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.post')
def test_post_error_with_code_property(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = '{"errorCode": "e0",' \
'"message": "desc"}'
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'e0')
self.assertEqual(context.exception.message, 'desc')
@mock.patch('requests.post')
def test_post_error_with_no_error_code(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = '{"message": "desc"}'
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message, 'desc')
@mock.patch('requests.post')
def test_post_error_with_text_response(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = 'there has been a terrible error'
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message,
'there has been a terrible error')
@mock.patch('requests.post')
def test_post_error_with_no_response_text(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = None
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message, '')
@mock.patch('requests.patch')
def test_patch(self, mock_patch):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token',
'Content-Type': 'application/json'}
mock_patch.return_value.text = '["a", "b"]'
mock_patch.return_value.status_code = 200
data = {'some': 'data'}
response = rc.patch(url='the-url', data=data)
mock_patch.assert_called_with('the-url', data=json.dumps(data),
headers=headers)
self.assertEqual(response, ['a', 'b'])
@mock.patch('requests.patch')
def test_patch_errors(self, mock_patch):
rc = RestClient(jwt='a-token', telemetry=False)
mock_patch.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_patch.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.patch(url='the/url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.delete')
def test_delete(self, mock_delete):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token'}
mock_delete.return_value.text = '["a", "b"]'
mock_delete.return_value.status_code = 200
response = rc.delete(url='the-url/ID')
mock_delete.assert_called_with('the-url/ID', headers=headers, params={})
self.assertEqual(response, ['a', 'b'])
@mock.patch('requests.delete')
def test_delete_errors(self, mock_delete):
rc = RestClient(jwt='a-token', telemetry=False)
mock_delete.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_delete.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.delete(url='the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
| 37.904943 | 85 | 0.604975 | import unittest
import json
import mock
from ...management.rest import RestClient
from ...exceptions import Auth0Error
class TestRest(unittest.TestCase):
@mock.patch('requests.get')
def test_get(self, mock_get):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token'}
mock_get.return_value.text = '["a", "b"]'
mock_get.return_value.status_code = 200
response = rc.get('the-url')
mock_get.assert_called_with('the-url', params=None, headers=headers)
self.assertEqual(response, ['a', 'b'])
response = rc.get(url='the/url', params={'A': 'param', 'B': 'param'})
mock_get.assert_called_with('the/url', params={'A': 'param',
'B': 'param'},
headers=headers)
self.assertEqual(response, ['a', 'b'])
mock_get.return_value.text = ''
response = rc.get('the/url')
self.assertEqual(response, '')
@mock.patch('requests.get')
def test_get_errors(self, mock_get):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token'}
mock_get.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_get.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.get('the/url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.post')
def test_post(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token',
'Content-Type': 'application/json'}
mock_post.return_value.text = '{"a": "b"}'
data = {'some': 'data'}
mock_post.return_value.status_code = 200
response = rc.post('the/url', data=data)
mock_post.assert_called_with('the/url', data=json.dumps(data),
headers=headers)
self.assertEqual(response, {'a': 'b'})
@mock.patch('requests.post')
def test_post_errors(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
mock_post.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_post.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.post')
def test_post_errors_with_no_message_property(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
mock_post.return_value.text = json.dumps({
"statusCode": 999,
"errorCode": "code",
"error": "error"
})
mock_post.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'error')
@mock.patch('requests.post')
def test_post_errors_with_no_message_or_error_property(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
mock_post.return_value.text = json.dumps({
"statusCode": 999,
"errorCode": "code"
})
mock_post.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, '')
@mock.patch('requests.post')
def test_post_errors_with_message_and_error_property(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
mock_post.return_value.text = json.dumps({
"statusCode": 999,
"errorCode": "code",
"error": "error",
"message": "message"
})
mock_post.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.post')
def test_post_error_with_code_property(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = '{"errorCode": "e0",' \
'"message": "desc"}'
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'e0')
self.assertEqual(context.exception.message, 'desc')
@mock.patch('requests.post')
def test_post_error_with_no_error_code(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = '{"message": "desc"}'
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message, 'desc')
@mock.patch('requests.post')
def test_post_error_with_text_response(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = 'there has been a terrible error'
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message,
'there has been a terrible error')
@mock.patch('requests.post')
def test_post_error_with_no_response_text(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = None
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message, '')
@mock.patch('requests.patch')
def test_patch(self, mock_patch):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token',
'Content-Type': 'application/json'}
mock_patch.return_value.text = '["a", "b"]'
mock_patch.return_value.status_code = 200
data = {'some': 'data'}
response = rc.patch(url='the-url', data=data)
mock_patch.assert_called_with('the-url', data=json.dumps(data),
headers=headers)
self.assertEqual(response, ['a', 'b'])
@mock.patch('requests.patch')
def test_patch_errors(self, mock_patch):
rc = RestClient(jwt='a-token', telemetry=False)
mock_patch.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_patch.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.patch(url='the/url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.delete')
def test_delete(self, mock_delete):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token'}
mock_delete.return_value.text = '["a", "b"]'
mock_delete.return_value.status_code = 200
response = rc.delete(url='the-url/ID')
mock_delete.assert_called_with('the-url/ID', headers=headers, params={})
self.assertEqual(response, ['a', 'b'])
@mock.patch('requests.delete')
def test_delete_errors(self, mock_delete):
rc = RestClient(jwt='a-token', telemetry=False)
mock_delete.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_delete.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.delete(url='the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
| true | true |
f7ff49fc517fae1b91285ef50ec51728c982d535 | 2,580 | py | Python | packages/fetchai/skills/tac_control_contract/parameters.py | marcofavorito/agents-aea | e520f2f5d076a193514e194d94aa76c6423ac5bc | [
"Apache-2.0"
] | null | null | null | packages/fetchai/skills/tac_control_contract/parameters.py | marcofavorito/agents-aea | e520f2f5d076a193514e194d94aa76c6423ac5bc | [
"Apache-2.0"
] | null | null | null | packages/fetchai/skills/tac_control_contract/parameters.py | marcofavorito/agents-aea | e520f2f5d076a193514e194d94aa76c6423ac5bc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This package contains a class representing the game parameters."""
from aea.helpers.transaction.base import Terms
from packages.fetchai.skills.tac_control.parameters import Parameters as BaseParameters
class Parameters(BaseParameters):
"""This class contains the parameters of the game."""
def __init__(self, **kwargs):
"""Instantiate the parameter class."""
super().__init__(**kwargs)
self.nb_completed_minting = 0
def get_deploy_terms(self) -> Terms:
"""
Get deploy terms of deployment.
:return: terms
"""
terms = Terms(
ledger_id=self.ledger_id,
sender_address=self.context.agent_address,
counterparty_address=self.context.agent_address,
amount_by_currency_id={},
quantities_by_good_id={},
nonce="",
)
return terms
def get_create_token_terms(self) -> Terms:
"""
Get create token terms of deployment.
:return: terms
"""
terms = Terms(
ledger_id=self.ledger_id,
sender_address=self.context.agent_address,
counterparty_address=self.context.agent_address,
amount_by_currency_id={},
quantities_by_good_id={},
nonce="",
)
return terms
def get_mint_token_terms(self) -> Terms:
"""
Get mint token terms of deployment.
:return: terms
"""
terms = Terms(
ledger_id=self.ledger_id,
sender_address=self.context.agent_address,
counterparty_address=self.context.agent_address,
amount_by_currency_id={},
quantities_by_good_id={},
nonce="",
)
return terms
| 31.463415 | 87 | 0.58876 |
from aea.helpers.transaction.base import Terms
from packages.fetchai.skills.tac_control.parameters import Parameters as BaseParameters
class Parameters(BaseParameters):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.nb_completed_minting = 0
def get_deploy_terms(self) -> Terms:
terms = Terms(
ledger_id=self.ledger_id,
sender_address=self.context.agent_address,
counterparty_address=self.context.agent_address,
amount_by_currency_id={},
quantities_by_good_id={},
nonce="",
)
return terms
def get_create_token_terms(self) -> Terms:
terms = Terms(
ledger_id=self.ledger_id,
sender_address=self.context.agent_address,
counterparty_address=self.context.agent_address,
amount_by_currency_id={},
quantities_by_good_id={},
nonce="",
)
return terms
def get_mint_token_terms(self) -> Terms:
terms = Terms(
ledger_id=self.ledger_id,
sender_address=self.context.agent_address,
counterparty_address=self.context.agent_address,
amount_by_currency_id={},
quantities_by_good_id={},
nonce="",
)
return terms
| true | true |
f7ff4a1833def6b0eaf3a0a43931354525bc1a06 | 43 | py | Python | useless_bot/cogs/bank/__init__.py | MRvillager/useless_bot | 68ee1a73d7f0ac4d041d96a02d93feae17194980 | [
"MIT"
] | null | null | null | useless_bot/cogs/bank/__init__.py | MRvillager/useless_bot | 68ee1a73d7f0ac4d041d96a02d93feae17194980 | [
"MIT"
] | null | null | null | useless_bot/cogs/bank/__init__.py | MRvillager/useless_bot | 68ee1a73d7f0ac4d041d96a02d93feae17194980 | [
"MIT"
] | null | null | null | from .bank import Bank
__all__ = ["Bank"]
| 10.75 | 22 | 0.674419 | from .bank import Bank
__all__ = ["Bank"]
| true | true |
f7ff4a188b5ea065dc32c27710a36e7fb875b895 | 87,607 | py | Python | rpython/rlib/rbigint.py | yxzoro/pypy | 6e47b3d3e5513d9639a21554963a6ace172ccfee | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2018-12-27T20:40:49.000Z | 2018-12-27T20:40:49.000Z | rpython/rlib/rbigint.py | GabriellaUwa/pypy | 2ede3b557a25cb49db969e942ca5a7f8a9eae0d4 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | rpython/rlib/rbigint.py | GabriellaUwa/pypy | 2ede3b557a25cb49db969e942ca5a7f8a9eae0d4 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | from rpython.rlib.rarithmetic import LONG_BIT, intmask, longlongmask, r_uint, r_ulonglong
from rpython.rlib.rarithmetic import ovfcheck, r_longlong, widen
from rpython.rlib.rarithmetic import most_neg_value_of_same_type
from rpython.rlib.rarithmetic import check_support_int128
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.debug import make_sure_not_resized, check_regular_int
from rpython.rlib.objectmodel import we_are_translated, specialize, not_rpython
from rpython.rlib import jit
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper import extregistry
import math, sys
SUPPORT_INT128 = check_support_int128()
BYTEORDER = sys.byteorder
# note about digit sizes:
# In division, the native integer type must be able to hold
# a sign bit plus two digits plus 1 overflow bit.
#SHIFT = (LONG_BIT // 2) - 1
if SUPPORT_INT128:
SHIFT = 63
UDIGIT_TYPE = r_ulonglong
if LONG_BIT >= 64:
UDIGIT_MASK = intmask
else:
UDIGIT_MASK = longlongmask
LONG_TYPE = rffi.__INT128_T
if LONG_BIT > SHIFT:
STORE_TYPE = lltype.Signed
UNSIGNED_TYPE = lltype.Unsigned
else:
STORE_TYPE = rffi.LONGLONG
UNSIGNED_TYPE = rffi.ULONGLONG
else:
SHIFT = 31
UDIGIT_TYPE = r_uint
UDIGIT_MASK = intmask
STORE_TYPE = lltype.Signed
UNSIGNED_TYPE = lltype.Unsigned
LONG_TYPE = rffi.LONGLONG
MASK = int((1 << SHIFT) - 1)
FLOAT_MULTIPLIER = float(1 << SHIFT)
# For BIGINT and INT mix.
#
# The VALID range of an int is different than a valid range of a bigint of length one.
# -1 << LONG_BIT is actually TWO digits, because they are stored without the sign.
if SHIFT == LONG_BIT - 1:
MIN_INT_VALUE = -1 << SHIFT
def int_in_valid_range(x):
if x == MIN_INT_VALUE:
return False
return True
else:
# Means we don't have INT128 on 64bit.
def int_in_valid_range(x):
if x > MASK or x < -MASK:
return False
return True
int_in_valid_range._always_inline_ = True
# Debugging digit array access.
#
# False == no checking at all
# True == check 0 <= value <= MASK
# For long multiplication, use the O(N**2) school algorithm unless
# both operands contain more than KARATSUBA_CUTOFF digits (this
# being an internal Python long digit, in base BASE).
# Karatsuba is O(N**1.585)
USE_KARATSUBA = True # set to False for comparison
if SHIFT > 31:
KARATSUBA_CUTOFF = 19
else:
KARATSUBA_CUTOFF = 38
KARATSUBA_SQUARE_CUTOFF = 2 * KARATSUBA_CUTOFF
# For exponentiation, use the binary left-to-right algorithm
# unless the exponent contains more than FIVEARY_CUTOFF digits.
# In that case, do 5 bits at a time. The potential drawback is that
# a table of 2**5 intermediate results is computed.
FIVEARY_CUTOFF = 8
@specialize.argtype(0)
def _mask_digit(x):
return UDIGIT_MASK(x & MASK)
def _widen_digit(x):
return rffi.cast(LONG_TYPE, x)
@specialize.argtype(0)
def _store_digit(x):
return rffi.cast(STORE_TYPE, x)
def _load_unsigned_digit(x):
return rffi.cast(UNSIGNED_TYPE, x)
_load_unsigned_digit._always_inline_ = True
NULLDIGIT = _store_digit(0)
ONEDIGIT = _store_digit(1)
def _check_digits(l):
for x in l:
assert type(x) is type(NULLDIGIT)
assert UDIGIT_MASK(x) & MASK == UDIGIT_MASK(x)
class InvalidEndiannessError(Exception):
pass
class InvalidSignednessError(Exception):
pass
class Entry(extregistry.ExtRegistryEntry):
_about_ = _check_digits
def compute_result_annotation(self, s_list):
from rpython.annotator import model as annmodel
assert isinstance(s_list, annmodel.SomeList)
s_DIGIT = self.bookkeeper.valueoftype(type(NULLDIGIT))
assert s_DIGIT.contains(s_list.listdef.listitem.s_value)
def specialize_call(self, hop):
hop.exception_cannot_occur()
class rbigint(object):
"""This is a reimplementation of longs using a list of digits."""
_immutable_ = True
_immutable_fields_ = ["_digits"]
def __init__(self, digits=[NULLDIGIT], sign=0, size=0):
if not we_are_translated():
_check_digits(digits)
make_sure_not_resized(digits)
self._digits = digits
assert size >= 0
self.size = size or len(digits)
self.sign = sign
# __eq__ and __ne__ method exist for testingl only, they are not RPython!
@not_rpython
def __eq__(self, other):
if not isinstance(other, rbigint):
return NotImplemented
return self.eq(other)
@not_rpython
def __ne__(self, other):
return not (self == other)
def digit(self, x):
"""Return the x'th digit, as an int."""
return self._digits[x]
digit._always_inline_ = True
def widedigit(self, x):
"""Return the x'th digit, as a long long int if needed
to have enough room to contain two digits."""
return _widen_digit(self._digits[x])
widedigit._always_inline_ = True
def udigit(self, x):
"""Return the x'th digit, as an unsigned int."""
return _load_unsigned_digit(self._digits[x])
udigit._always_inline_ = True
@specialize.argtype(2)
def setdigit(self, x, val):
val = _mask_digit(val)
assert val >= 0
self._digits[x] = _store_digit(val)
setdigit._always_inline_ = True
def numdigits(self):
return self.size
numdigits._always_inline_ = True
@staticmethod
@jit.elidable
def fromint(intval):
# This function is marked as pure, so you must not call it and
# then modify the result.
check_regular_int(intval)
if intval < 0:
sign = -1
ival = -r_uint(intval)
elif intval > 0:
sign = 1
ival = r_uint(intval)
else:
return NULLRBIGINT
carry = ival >> SHIFT
if carry:
return rbigint([_store_digit(ival & MASK),
_store_digit(carry)], sign, 2)
else:
return rbigint([_store_digit(ival & MASK)], sign, 1)
@staticmethod
@jit.elidable
def frombool(b):
# You must not call this function and then modify the result.
if b:
return ONERBIGINT
return NULLRBIGINT
@staticmethod
@not_rpython
def fromlong(l):
return rbigint(*args_from_long(l))
@staticmethod
@jit.elidable
def fromfloat(dval):
""" Create a new bigint object from a float """
# This function is not marked as pure because it can raise
if math.isinf(dval):
raise OverflowError("cannot convert float infinity to integer")
if math.isnan(dval):
raise ValueError("cannot convert float NaN to integer")
return rbigint._fromfloat_finite(dval)
@staticmethod
@jit.elidable
def _fromfloat_finite(dval):
sign = 1
if dval < 0.0:
sign = -1
dval = -dval
frac, expo = math.frexp(dval) # dval = frac*2**expo; 0.0 <= frac < 1.0
if expo <= 0:
return NULLRBIGINT
ndig = (expo-1) // SHIFT + 1 # Number of 'digits' in result
v = rbigint([NULLDIGIT] * ndig, sign, ndig)
frac = math.ldexp(frac, (expo-1) % SHIFT + 1)
for i in range(ndig-1, -1, -1):
# use int(int(frac)) as a workaround for a CPython bug:
# with frac == 2147483647.0, int(frac) == 2147483647L
bits = int(int(frac))
v.setdigit(i, bits)
frac -= float(bits)
frac = math.ldexp(frac, SHIFT)
return v
@staticmethod
@jit.elidable
@specialize.argtype(0)
def fromrarith_int(i):
# This function is marked as pure, so you must not call it and
# then modify the result.
return rbigint(*args_from_rarith_int(i))
@staticmethod
@jit.elidable
def fromdecimalstr(s):
# This function is marked as elidable, so you must not call it and
# then modify the result.
return _decimalstr_to_bigint(s)
@staticmethod
@jit.elidable
def fromstr(s, base=0, allow_underscores=False):
"""As string_to_int(), but ignores an optional 'l' or 'L' suffix
and returns an rbigint."""
from rpython.rlib.rstring import NumberStringParser, \
strip_spaces
s = literal = strip_spaces(s)
if (s.endswith('l') or s.endswith('L')) and base < 22:
# in base 22 and above, 'L' is a valid digit! try: long('L',22)
s = s[:-1]
parser = NumberStringParser(s, literal, base, 'long',
allow_underscores=allow_underscores)
return rbigint._from_numberstring_parser(parser)
@staticmethod
def _from_numberstring_parser(parser):
return parse_digit_string(parser)
@staticmethod
@jit.elidable
def frombytes(s, byteorder, signed):
if byteorder not in ('big', 'little'):
raise InvalidEndiannessError()
if not s:
return NULLRBIGINT
if byteorder == 'big':
msb = ord(s[0])
itr = range(len(s)-1, -1, -1)
else:
msb = ord(s[-1])
itr = range(0, len(s))
sign = -1 if msb >= 0x80 and signed else 1
accum = _widen_digit(0)
accumbits = 0
digits = []
carry = 1
for i in itr:
c = _widen_digit(ord(s[i]))
if sign == -1:
c = (0xFF ^ c) + carry
carry = c >> 8
c &= 0xFF
accum |= c << accumbits
accumbits += 8
if accumbits >= SHIFT:
digits.append(_store_digit(intmask(accum & MASK)))
accum >>= SHIFT
accumbits -= SHIFT
if accumbits:
digits.append(_store_digit(intmask(accum)))
result = rbigint(digits[:], sign)
result._normalize()
return result
@jit.elidable
def tobytes(self, nbytes, byteorder, signed):
if byteorder not in ('big', 'little'):
raise InvalidEndiannessError()
if not signed and self.sign == -1:
raise InvalidSignednessError()
bswap = byteorder == 'big'
d = _widen_digit(0)
j = 0
imax = self.numdigits()
accum = _widen_digit(0)
accumbits = 0
result = StringBuilder(nbytes)
carry = 1
for i in range(0, imax):
d = self.widedigit(i)
if self.sign == -1:
d = (d ^ MASK) + carry
carry = d >> SHIFT
d &= MASK
accum |= d << accumbits
if i == imax - 1:
# Avoid bogus 0's
s = d ^ MASK if self.sign == -1 else d
while s:
s >>= 1
accumbits += 1
else:
accumbits += SHIFT
while accumbits >= 8:
if j >= nbytes:
raise OverflowError()
j += 1
result.append(chr(accum & 0xFF))
accum >>= 8
accumbits -= 8
if accumbits:
if j >= nbytes:
raise OverflowError()
j += 1
if self.sign == -1:
# Add a sign bit
accum |= (~_widen_digit(0)) << accumbits
result.append(chr(accum & 0xFF))
if j < nbytes:
signbyte = 0xFF if self.sign == -1 else 0
result.append_multiple_char(chr(signbyte), nbytes - j)
digits = result.build()
if j == nbytes and nbytes > 0 and signed:
# If not already set, we cannot contain the sign bit
msb = digits[-1]
if (self.sign == -1) != (ord(msb) >= 0x80):
raise OverflowError()
if bswap:
# Bah, this is very inefficient. At least it's not
# quadratic.
length = len(digits)
if length >= 0:
digits = ''.join([digits[i] for i in range(length-1, -1, -1)])
return digits
def toint(self):
"""
Get an integer from a bigint object.
Raises OverflowError if overflow occurs.
"""
if self.numdigits() > MAX_DIGITS_THAT_CAN_FIT_IN_INT:
raise OverflowError
return self._toint_helper()
@jit.elidable
def _toint_helper(self):
x = self._touint_helper()
# Haven't lost any bits so far
if self.sign >= 0:
res = intmask(x)
if res < 0:
raise OverflowError
else:
# Use "-" on the unsigned number, not on the signed number.
# This is needed to produce valid C code.
res = intmask(-x)
if res >= 0:
raise OverflowError
return res
@jit.elidable
def tolonglong(self):
return _AsLongLong(self)
def tobool(self):
return self.sign != 0
@jit.elidable
def touint(self):
if self.sign == -1:
raise ValueError("cannot convert negative integer to unsigned int")
return self._touint_helper()
@jit.elidable
def _touint_helper(self):
x = r_uint(0)
i = self.numdigits() - 1
while i >= 0:
prev = x
x = (x << SHIFT) + self.udigit(i)
if (x >> SHIFT) != prev:
raise OverflowError("long int too large to convert to unsigned int")
i -= 1
return x
@jit.elidable
def toulonglong(self):
if self.sign == -1:
raise ValueError("cannot convert negative integer to unsigned int")
return _AsULonglong_ignore_sign(self)
@jit.elidable
def uintmask(self):
return _AsUInt_mask(self)
@jit.elidable
def ulonglongmask(self):
"""Return r_ulonglong(self), truncating."""
return _AsULonglong_mask(self)
@jit.elidable
def tofloat(self):
return _AsDouble(self)
@jit.elidable
def format(self, digits, prefix='', suffix=''):
# 'digits' is a string whose length is the base to use,
# and where each character is the corresponding digit.
return _format(self, digits, prefix, suffix)
@jit.elidable
def repr(self):
try:
x = self.toint()
except OverflowError:
return self.format(BASE10, suffix="L")
return str(x) + "L"
@jit.elidable
def str(self):
try:
x = self.toint()
except OverflowError:
return self.format(BASE10)
return str(x)
@jit.elidable
def eq(self, other):
if (self.sign != other.sign or
self.numdigits() != other.numdigits()):
return False
i = 0
ld = self.numdigits()
while i < ld:
if self.digit(i) != other.digit(i):
return False
i += 1
return True
@jit.elidable
def int_eq(self, other):
""" eq with int """
if not int_in_valid_range(other):
# Fallback to Long.
return self.eq(rbigint.fromint(other))
if self.numdigits() > 1:
return False
return (self.sign * self.digit(0)) == other
def ne(self, other):
return not self.eq(other)
def int_ne(self, other):
return not self.int_eq(other)
@jit.elidable
def lt(self, other):
if self.sign > other.sign:
return False
if self.sign < other.sign:
return True
ld1 = self.numdigits()
ld2 = other.numdigits()
if ld1 > ld2:
if other.sign > 0:
return False
else:
return True
elif ld1 < ld2:
if other.sign > 0:
return True
else:
return False
i = ld1 - 1
while i >= 0:
d1 = self.digit(i)
d2 = other.digit(i)
if d1 < d2:
if other.sign > 0:
return True
else:
return False
elif d1 > d2:
if other.sign > 0:
return False
else:
return True
i -= 1
return False
@jit.elidable
def int_lt(self, other):
""" lt where other is an int """
if not int_in_valid_range(other):
# Fallback to Long.
return self.lt(rbigint.fromint(other))
osign = 1
if other == 0:
osign = 0
elif other < 0:
osign = -1
if self.sign > osign:
return False
elif self.sign < osign:
return True
digits = self.numdigits()
if digits > 1:
if osign == 1:
return False
else:
return True
d1 = self.sign * self.digit(0)
if d1 < other:
return True
return False
def le(self, other):
return not other.lt(self)
def int_le(self, other):
# Alternative that might be faster, reimplant this. as a check with other + 1. But we got to check for overflow
# or reduce valid range.
if self.int_eq(other):
return True
return self.int_lt(other)
def gt(self, other):
return other.lt(self)
def int_gt(self, other):
return not self.int_le(other)
def ge(self, other):
return not self.lt(other)
def int_ge(self, other):
return not self.int_lt(other)
@jit.elidable
def hash(self):
return _hash(self)
@jit.elidable
def add(self, other):
if self.sign == 0:
return other
if other.sign == 0:
return self
if self.sign == other.sign:
result = _x_add(self, other)
else:
result = _x_sub(other, self)
result.sign *= other.sign
return result
@jit.elidable
def int_add(self, other):
if not int_in_valid_range(other):
# Fallback to long.
return self.add(rbigint.fromint(other))
elif self.sign == 0:
return rbigint.fromint(other)
elif other == 0:
return self
sign = -1 if other < 0 else 1
if self.sign == sign:
result = _x_int_add(self, other)
else:
result = _x_int_sub(self, other)
result.sign *= -1
result.sign *= sign
return result
@jit.elidable
def sub(self, other):
if other.sign == 0:
return self
elif self.sign == 0:
return rbigint(other._digits[:other.size], -other.sign, other.size)
elif self.sign == other.sign:
result = _x_sub(self, other)
else:
result = _x_add(self, other)
result.sign *= self.sign
return result
@jit.elidable
def int_sub(self, other):
if not int_in_valid_range(other):
# Fallback to long.
return self.sub(rbigint.fromint(other))
elif other == 0:
return self
elif self.sign == 0:
return rbigint.fromint(-other)
elif self.sign == (-1 if other < 0 else 1):
result = _x_int_sub(self, other)
else:
result = _x_int_add(self, other)
result.sign *= self.sign
return result
@jit.elidable
def mul(self, b):
asize = self.numdigits()
bsize = b.numdigits()
a = self
if asize > bsize:
a, b, asize, bsize = b, a, bsize, asize
if a.sign == 0 or b.sign == 0:
return NULLRBIGINT
if asize == 1:
if a._digits[0] == ONEDIGIT:
return rbigint(b._digits[:b.size], a.sign * b.sign, b.size)
elif bsize == 1:
res = b.widedigit(0) * a.widedigit(0)
carry = res >> SHIFT
if carry:
return rbigint([_store_digit(res & MASK), _store_digit(carry)], a.sign * b.sign, 2)
else:
return rbigint([_store_digit(res & MASK)], a.sign * b.sign, 1)
result = _x_mul(a, b, a.digit(0))
elif USE_KARATSUBA:
if a is b:
i = KARATSUBA_SQUARE_CUTOFF
else:
i = KARATSUBA_CUTOFF
if asize <= i:
result = _x_mul(a, b)
"""elif 2 * asize <= bsize:
result = _k_lopsided_mul(a, b)"""
else:
result = _k_mul(a, b)
else:
result = _x_mul(a, b)
result.sign = a.sign * b.sign
return result
@jit.elidable
def int_mul(self, b):
if not int_in_valid_range(b):
# Fallback to long.
return self.mul(rbigint.fromint(b))
if self.sign == 0 or b == 0:
return NULLRBIGINT
asize = self.numdigits()
digit = abs(b)
bsign = -1 if b < 0 else 1
if digit == 1:
return rbigint(self._digits[:self.size], self.sign * bsign, asize)
elif asize == 1:
res = self.widedigit(0) * digit
carry = res >> SHIFT
if carry:
return rbigint([_store_digit(res & MASK), _store_digit(carry)], self.sign * bsign, 2)
else:
return rbigint([_store_digit(res & MASK)], self.sign * bsign, 1)
elif digit & (digit - 1) == 0:
result = self.lqshift(ptwotable[digit])
else:
result = _muladd1(self, digit)
result.sign = self.sign * bsign
return result
@jit.elidable
def truediv(self, other):
div = _bigint_true_divide(self, other)
return div
@jit.elidable
def floordiv(self, other):
if self.sign == 1 and other.numdigits() == 1 and other.sign == 1:
digit = other.digit(0)
if digit == 1:
return rbigint(self._digits[:self.size], 1, self.size)
elif digit and digit & (digit - 1) == 0:
return self.rshift(ptwotable[digit])
div, mod = _divrem(self, other)
if mod.sign * other.sign == -1:
if div.sign == 0:
return ONENEGATIVERBIGINT
div = div.int_sub(1)
return div
def div(self, other):
return self.floordiv(other)
@jit.elidable
def mod(self, other):
if other.sign == 0:
raise ZeroDivisionError("long division or modulo by zero")
if self.sign == 0:
return NULLRBIGINT
if other.numdigits() == 1:
otherint = other.digit(0) * other.sign
assert int_in_valid_range(otherint)
return self.int_mod(otherint)
else:
div, mod = _divrem(self, other)
if mod.sign * other.sign == -1:
mod = mod.add(other)
return mod
@jit.elidable
def int_mod(self, other):
if other == 0:
raise ZeroDivisionError("long division or modulo by zero")
if self.sign == 0:
return NULLRBIGINT
elif not int_in_valid_range(other):
# Fallback to long.
return self.mod(rbigint.fromint(other))
if 1: # preserve indentation to preserve history
digit = abs(other)
if digit == 1:
return NULLRBIGINT
elif digit == 2:
modm = self.digit(0) & 1
if modm:
return ONENEGATIVERBIGINT if other < 0 else ONERBIGINT
return NULLRBIGINT
elif digit & (digit - 1) == 0:
mod = self.int_and_(digit - 1)
else:
# Perform
size = self.numdigits() - 1
if size > 0:
rem = self.widedigit(size)
size -= 1
while size >= 0:
rem = ((rem << SHIFT) + self.widedigit(size)) % digit
size -= 1
else:
rem = self.digit(0) % digit
if rem == 0:
return NULLRBIGINT
mod = rbigint([_store_digit(rem)], -1 if self.sign < 0 else 1, 1)
if mod.sign * (-1 if other < 0 else 1) == -1:
mod = mod.int_add(other)
return mod
@jit.elidable
def divmod(v, w):
"""
The / and % operators are now defined in terms of divmod().
The expression a mod b has the value a - b*floor(a/b).
The _divrem function gives the remainder after division of
|a| by |b|, with the sign of a. This is also expressed
as a - b*trunc(a/b), if trunc truncates towards zero.
Some examples:
a b a rem b a mod b
13 10 3 3
-13 10 -3 7
13 -10 3 -7
-13 -10 -3 -3
So, to get from rem to mod, we have to add b if a and b
have different signs. We then subtract one from the 'div'
part of the outcome to keep the invariant intact.
"""
div, mod = _divrem(v, w)
if mod.sign * w.sign == -1:
mod = mod.add(w)
if div.sign == 0:
return ONENEGATIVERBIGINT, mod
div = div.int_sub(1)
return div, mod
@jit.elidable
def pow(a, b, c=None):
negativeOutput = False # if x<0 return negative output
# 5-ary values. If the exponent is large enough, table is
# precomputed so that table[i] == a**i % c for i in range(32).
# python translation: the table is computed when needed.
if b.sign < 0: # if exponent is negative
if c is not None:
raise TypeError(
"pow() 2nd argument "
"cannot be negative when 3rd argument specified")
# XXX failed to implement
raise ValueError("bigint pow() too negative")
size_b = b.numdigits()
if c is not None:
if c.sign == 0:
raise ValueError("pow() 3rd argument cannot be 0")
# if modulus < 0:
# negativeOutput = True
# modulus = -modulus
if c.sign < 0:
negativeOutput = True
c = c.neg()
# if modulus == 1:
# return 0
if c.numdigits() == 1 and c._digits[0] == ONEDIGIT:
return NULLRBIGINT
# Reduce base by modulus in some cases:
# 1. If base < 0. Forcing the base non-neg makes things easier.
# 2. If base is obviously larger than the modulus. The "small
# exponent" case later can multiply directly by base repeatedly,
# while the "large exponent" case multiplies directly by base 31
# times. It can be unboundedly faster to multiply by
# base % modulus instead.
# We could _always_ do this reduction, but mod() isn't cheap,
# so we only do it when it buys something.
if a.sign < 0 or a.numdigits() > c.numdigits():
a = a.mod(c)
elif b.sign == 0:
return ONERBIGINT
elif a.sign == 0:
return NULLRBIGINT
elif size_b == 1:
if b._digits[0] == NULLDIGIT:
return ONERBIGINT if a.sign == 1 else ONENEGATIVERBIGINT
elif b._digits[0] == ONEDIGIT:
return a
elif a.numdigits() == 1:
adigit = a.digit(0)
digit = b.digit(0)
if adigit == 1:
if a.sign == -1 and digit % 2:
return ONENEGATIVERBIGINT
return ONERBIGINT
elif adigit & (adigit - 1) == 0:
ret = a.lshift(((digit-1)*(ptwotable[adigit]-1)) + digit-1)
if a.sign == -1 and not digit % 2:
ret.sign = 1
return ret
# At this point a, b, and c are guaranteed non-negative UNLESS
# c is NULL, in which case a may be negative. */
z = rbigint([ONEDIGIT], 1, 1)
# python adaptation: moved macros REDUCE(X) and MULT(X, Y, result)
# into helper function result = _help_mult(x, y, c)
if size_b <= FIVEARY_CUTOFF:
# Left-to-right binary exponentiation (HAC Algorithm 14.79)
# http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
size_b -= 1
while size_b >= 0:
bi = b.digit(size_b)
j = 1 << (SHIFT-1)
while j != 0:
z = _help_mult(z, z, c)
if bi & j:
z = _help_mult(z, a, c)
j >>= 1
size_b -= 1
else:
# Left-to-right 5-ary exponentiation (HAC Algorithm 14.82)
# This is only useful in the case where c != None.
# z still holds 1L
table = [z] * 32
table[0] = z
for i in range(1, 32):
table[i] = _help_mult(table[i-1], a, c)
# Note that here SHIFT is not a multiple of 5. The difficulty
# is to extract 5 bits at a time from 'b', starting from the
# most significant digits, so that at the end of the algorithm
# it falls exactly to zero.
# m = max number of bits = i * SHIFT
# m+ = m rounded up to the next multiple of 5
# j = (m+) % SHIFT = (m+) - (i * SHIFT)
# (computed without doing "i * SHIFT", which might overflow)
j = size_b % 5
j = _jmapping[j]
if not we_are_translated():
assert j == (size_b*SHIFT+4)//5*5 - size_b*SHIFT
#
accum = r_uint(0)
while True:
j -= 5
if j >= 0:
index = (accum >> j) & 0x1f
else:
# 'accum' does not have enough digit.
# must get the next digit from 'b' in order to complete
if size_b == 0:
break # Done
size_b -= 1
assert size_b >= 0
bi = b.udigit(size_b)
index = ((accum << (-j)) | (bi >> (j+SHIFT))) & 0x1f
accum = bi
j += SHIFT
#
for k in range(5):
z = _help_mult(z, z, c)
if index:
z = _help_mult(z, table[index], c)
#
assert j == -5
if negativeOutput and z.sign != 0:
z = z.sub(c)
return z
@jit.elidable
def neg(self):
return rbigint(self._digits, -self.sign, self.size)
@jit.elidable
def abs(self):
if self.sign != -1:
return self
return rbigint(self._digits, 1, self.size)
@jit.elidable
def invert(self): #Implement ~x as -(x + 1)
if self.sign == 0:
return ONENEGATIVERBIGINT
ret = self.int_add(1)
ret.sign = -ret.sign
return ret
@jit.elidable
def lshift(self, int_other):
if int_other < 0:
raise ValueError("negative shift count")
elif int_other == 0:
return self
# wordshift, remshift = divmod(int_other, SHIFT)
wordshift = int_other // SHIFT
remshift = int_other - wordshift * SHIFT
if not remshift:
# So we can avoid problems with eq, AND avoid the need for normalize.
if self.sign == 0:
return self
return rbigint([NULLDIGIT] * wordshift + self._digits, self.sign, self.size + wordshift)
oldsize = self.numdigits()
newsize = oldsize + wordshift + 1
z = rbigint([NULLDIGIT] * newsize, self.sign, newsize)
accum = _widen_digit(0)
j = 0
while j < oldsize:
accum += self.widedigit(j) << remshift
z.setdigit(wordshift, accum)
accum >>= SHIFT
wordshift += 1
j += 1
newsize -= 1
assert newsize >= 0
z.setdigit(newsize, accum)
z._normalize()
return z
lshift._always_inline_ = True # It's so fast that it's always benefitial.
@jit.elidable
def lqshift(self, int_other):
" A quicker one with much less checks, int_other is valid and for the most part constant."
assert int_other > 0
oldsize = self.numdigits()
z = rbigint([NULLDIGIT] * (oldsize + 1), self.sign, (oldsize + 1))
accum = _widen_digit(0)
i = 0
while i < oldsize:
accum += self.widedigit(i) << int_other
z.setdigit(i, accum)
accum >>= SHIFT
i += 1
z.setdigit(oldsize, accum)
z._normalize()
return z
lqshift._always_inline_ = True # It's so fast that it's always benefitial.
@jit.elidable
def rshift(self, int_other, dont_invert=False):
if int_other < 0:
raise ValueError("negative shift count")
elif int_other == 0:
return self
if self.sign == -1 and not dont_invert:
a = self.invert().rshift(int_other)
return a.invert()
wordshift = int_other / SHIFT
newsize = self.numdigits() - wordshift
if newsize <= 0:
return NULLRBIGINT
loshift = int_other % SHIFT
hishift = SHIFT - loshift
z = rbigint([NULLDIGIT] * newsize, self.sign, newsize)
i = 0
while i < newsize:
newdigit = (self.digit(wordshift) >> loshift)
if i+1 < newsize:
newdigit |= (self.digit(wordshift+1) << hishift)
z.setdigit(i, newdigit)
i += 1
wordshift += 1
z._normalize()
return z
rshift._always_inline_ = 'try' # It's so fast that it's always benefitial.
@jit.elidable
def abs_rshift_and_mask(self, bigshiftcount, mask):
assert isinstance(bigshiftcount, r_ulonglong)
assert mask >= 0
wordshift = bigshiftcount / SHIFT
numdigits = self.numdigits()
if wordshift >= numdigits:
return 0
wordshift = intmask(wordshift)
loshift = intmask(intmask(bigshiftcount) - intmask(wordshift * SHIFT))
lastdigit = self.digit(wordshift) >> loshift
if mask > (MASK >> loshift) and wordshift + 1 < numdigits:
hishift = SHIFT - loshift
lastdigit |= self.digit(wordshift+1) << hishift
return lastdigit & mask
@staticmethod
def from_list_n_bits(list, nbits):
if len(list) == 0:
return NULLRBIGINT
if nbits == SHIFT:
z = rbigint(list, 1)
else:
if not (1 <= nbits < SHIFT):
raise ValueError
lllength = (r_ulonglong(len(list)) * nbits) // SHIFT
length = intmask(lllength) + 1
z = rbigint([NULLDIGIT] * length, 1)
out = 0
i = 0
accum = 0
for input in list:
accum |= (input << i)
original_i = i
i += nbits
if i > SHIFT:
z.setdigit(out, accum)
out += 1
accum = input >> (SHIFT - original_i)
i -= SHIFT
assert out < length
z.setdigit(out, accum)
z._normalize()
return z
@jit.elidable
def and_(self, other):
return _bitwise(self, '&', other)
@jit.elidable
def int_and_(self, other):
return _int_bitwise(self, '&', other)
@jit.elidable
def xor(self, other):
return _bitwise(self, '^', other)
@jit.elidable
def int_xor(self, other):
return _int_bitwise(self, '^', other)
@jit.elidable
def or_(self, other):
return _bitwise(self, '|', other)
@jit.elidable
def int_or_(self, other):
return _int_bitwise(self, '|', other)
@jit.elidable
def oct(self):
if self.sign == 0:
return '0L'
else:
return _format(self, BASE8, '0', 'L')
@jit.elidable
def hex(self):
return _format(self, BASE16, '0x', 'L')
@jit.elidable
def log(self, base):
# base is supposed to be positive or 0.0, which means we use e
if base == 10.0:
return _loghelper(math.log10, self)
if base == 2.0:
from rpython.rlib import rfloat
return _loghelper(rfloat.log2, self)
ret = _loghelper(math.log, self)
if base != 0.0:
ret /= math.log(base)
return ret
@not_rpython
def tolong(self):
l = 0L
digits = list(self._digits)
digits.reverse()
for d in digits:
l = l << SHIFT
l += intmask(d)
return l * self.sign
def _normalize(self):
i = self.numdigits()
while i > 1 and self._digits[i - 1] == NULLDIGIT:
i -= 1
assert i > 0
if i != self.numdigits():
self.size = i
if self.numdigits() == 1 and self._digits[0] == NULLDIGIT:
self.sign = 0
self._digits = [NULLDIGIT]
_normalize._always_inline_ = True
@jit.elidable
def bit_length(self):
i = self.numdigits()
if i == 1 and self._digits[0] == NULLDIGIT:
return 0
msd = self.digit(i - 1)
msd_bits = 0
while msd >= 32:
msd_bits += 6
msd >>= 6
msd_bits += [
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
][msd]
# yes, this can overflow: a huge number which fits 3 gigabytes of
# memory has around 24 gigabits!
bits = ovfcheck((i-1) * SHIFT) + msd_bits
return bits
def __repr__(self):
return "<rbigint digits=%s, sign=%s, size=%d, len=%d, %s>" % (self._digits,
self.sign, self.size, len(self._digits),
self.str())
ONERBIGINT = rbigint([ONEDIGIT], 1, 1)
ONENEGATIVERBIGINT = rbigint([ONEDIGIT], -1, 1)
NULLRBIGINT = rbigint()
_jmapping = [(5 * SHIFT) % 5,
(4 * SHIFT) % 5,
(3 * SHIFT) % 5,
(2 * SHIFT) % 5,
(1 * SHIFT) % 5]
# if the bigint has more digits than this, it cannot fit into an int
MAX_DIGITS_THAT_CAN_FIT_IN_INT = rbigint.fromint(-sys.maxint - 1).numdigits()
#_________________________________________________________________
# Helper Functions
def _help_mult(x, y, c):
"""
Multiply two values, then reduce the result:
result = X*Y % c. If c is None, skip the mod.
"""
res = x.mul(y)
# Perform a modular reduction, X = X % c, but leave X alone if c
# is NULL.
if c is not None:
res = res.mod(c)
return res
@specialize.argtype(0)
def digits_from_nonneg_long(l):
digits = []
while True:
digits.append(_store_digit(_mask_digit(l & MASK)))
l = l >> SHIFT
if not l:
return digits[:] # to make it non-resizable
@specialize.argtype(0)
def digits_for_most_neg_long(l):
# This helper only works if 'l' is the most negative integer of its
# type, which in base 2 looks like: 1000000..0000
digits = []
while _mask_digit(l) == 0:
digits.append(NULLDIGIT)
l = l >> SHIFT
# now 'l' looks like: ...111100000
# turn it into: ...000100000
# to drop the extra unwanted 1's introduced by the signed right shift
l = -intmask(l)
assert l & MASK == l
digits.append(_store_digit(l))
return digits[:] # to make it non-resizable
@specialize.argtype(0)
def args_from_rarith_int1(x):
if x > 0:
return digits_from_nonneg_long(x), 1
elif x == 0:
return [NULLDIGIT], 0
elif x != most_neg_value_of_same_type(x):
# normal case
return digits_from_nonneg_long(-x), -1
else:
# the most negative integer! hacks needed...
return digits_for_most_neg_long(x), -1
@specialize.argtype(0)
def args_from_rarith_int(x):
return args_from_rarith_int1(widen(x))
# ^^^ specialized by the precise type of 'x', which is typically a r_xxx
# instance from rlib.rarithmetic
@not_rpython
def args_from_long(x):
if x >= 0:
if x == 0:
return [NULLDIGIT], 0
else:
return digits_from_nonneg_long(x), 1
else:
return digits_from_nonneg_long(-x), -1
def _x_add(a, b):
""" Add the absolute values of two bigint integers. """
size_a = a.numdigits()
size_b = b.numdigits()
# Ensure a is the larger of the two:
if size_a < size_b:
a, b = b, a
size_a, size_b = size_b, size_a
z = rbigint([NULLDIGIT] * (size_a + 1), 1)
i = UDIGIT_TYPE(0)
carry = UDIGIT_TYPE(0)
while i < size_b:
carry += a.udigit(i) + b.udigit(i)
z.setdigit(i, carry)
carry >>= SHIFT
i += 1
while i < size_a:
carry += a.udigit(i)
z.setdigit(i, carry)
carry >>= SHIFT
i += 1
z.setdigit(i, carry)
z._normalize()
return z
def _x_int_add(a, b):
""" Add the absolute values of one bigint and one integer. """
size_a = a.numdigits()
z = rbigint([NULLDIGIT] * (size_a + 1), 1)
i = UDIGIT_TYPE(1)
carry = a.udigit(0) + abs(b)
z.setdigit(0, carry)
carry >>= SHIFT
while i < size_a:
carry += a.udigit(i)
z.setdigit(i, carry)
carry >>= SHIFT
i += 1
z.setdigit(i, carry)
z._normalize()
return z
def _x_sub(a, b):
""" Subtract the absolute values of two integers. """
size_a = a.numdigits()
size_b = b.numdigits()
sign = 1
# Ensure a is the larger of the two:
if size_a < size_b:
sign = -1
a, b = b, a
size_a, size_b = size_b, size_a
elif size_a == size_b:
# Find highest digit where a and b differ:
i = size_a - 1
while i >= 0 and a.digit(i) == b.digit(i):
i -= 1
if i < 0:
return NULLRBIGINT
if a.digit(i) < b.digit(i):
sign = -1
a, b = b, a
size_a = size_b = i+1
z = rbigint([NULLDIGIT] * size_a, sign, size_a)
borrow = UDIGIT_TYPE(0)
i = _load_unsigned_digit(0)
while i < size_b:
# The following assumes unsigned arithmetic
# works modulo 2**N for some N>SHIFT.
borrow = a.udigit(i) - b.udigit(i) - borrow
z.setdigit(i, borrow)
borrow >>= SHIFT
#borrow &= 1 # Keep only one sign bit
i += 1
while i < size_a:
borrow = a.udigit(i) - borrow
z.setdigit(i, borrow)
borrow >>= SHIFT
#borrow &= 1
i += 1
assert borrow == 0
z._normalize()
return z
def _x_int_sub(a, b):
""" Subtract the absolute values of two integers. """
size_a = a.numdigits()
bdigit = abs(b)
if size_a == 1:
# Find highest digit where a and b differ:
adigit = a.digit(0)
if adigit == bdigit:
return NULLRBIGINT
return rbigint.fromint(adigit - bdigit)
z = rbigint([NULLDIGIT] * size_a, 1, size_a)
i = _load_unsigned_digit(1)
# The following assumes unsigned arithmetic
# works modulo 2**N for some N>SHIFT.
borrow = a.udigit(0) - bdigit
z.setdigit(0, borrow)
borrow >>= SHIFT
#borrow &= 1 # Keep only one sign bit
while i < size_a:
borrow = a.udigit(i) - borrow
z.setdigit(i, borrow)
borrow >>= SHIFT
#borrow &= 1
i += 1
assert borrow == 0
z._normalize()
return z
# A neat little table of power of twos.
ptwotable = {}
for x in range(SHIFT-1):
ptwotable[r_longlong(2 << x)] = x+1
ptwotable[r_longlong(-2 << x)] = x+1
def _x_mul(a, b, digit=0):
"""
Grade school multiplication, ignoring the signs.
Returns the absolute value of the product, or None if error.
"""
size_a = a.numdigits()
size_b = b.numdigits()
if a is b:
# Efficient squaring per HAC, Algorithm 14.16:
# http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
# Gives slightly less than a 2x speedup when a == b,
# via exploiting that each entry in the multiplication
# pyramid appears twice (except for the size_a squares).
z = rbigint([NULLDIGIT] * (size_a + size_b), 1)
i = UDIGIT_TYPE(0)
while i < size_a:
f = a.widedigit(i)
pz = i << 1
pa = i + 1
carry = z.widedigit(pz) + f * f
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
assert carry <= MASK
# Now f is added in twice in each column of the
# pyramid it appears. Same as adding f<<1 once.
f <<= 1
while pa < size_a:
carry += z.widedigit(pz) + a.widedigit(pa) * f
pa += 1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
carry += z.widedigit(pz)
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
z.setdigit(pz, z.widedigit(pz) + carry)
assert (carry >> SHIFT) == 0
i += 1
z._normalize()
return z
elif digit:
if digit & (digit - 1) == 0:
return b.lqshift(ptwotable[digit])
# Even if it's not power of two it can still be useful.
return _muladd1(b, digit)
# a is not b
# use the following identity to reduce the number of operations
# a * b = a_0*b_0 + sum_{i=1}^n(a_0*b_i + a_1*b_{i-1}) + a_1*b_n
z = rbigint([NULLDIGIT] * (size_a + size_b), 1)
i = UDIGIT_TYPE(0)
size_a1 = UDIGIT_TYPE(size_a - 1)
size_b1 = UDIGIT_TYPE(size_b - 1)
while i < size_a1:
f0 = a.widedigit(i)
f1 = a.widedigit(i + 1)
pz = i
carry = z.widedigit(pz) + b.widedigit(0) * f0
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
j = UDIGIT_TYPE(0)
while j < size_b1:
# this operation does not overflow using
# SHIFT = (LONG_BIT // 2) - 1 = B - 1; in fact before it
# carry and z.widedigit(pz) are less than 2**(B - 1);
# b.widedigit(j + 1) * f0 < (2**(B-1) - 1)**2; so
# carry + z.widedigit(pz) + b.widedigit(j + 1) * f0 +
# b.widedigit(j) * f1 < 2**(2*B - 1) - 2**B < 2**LONG)BIT - 1
carry += z.widedigit(pz) + b.widedigit(j + 1) * f0 + \
b.widedigit(j) * f1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
j += 1
# carry < 2**(B + 1) - 2
carry += z.widedigit(pz) + b.widedigit(size_b1) * f1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
# carry < 4
if carry:
z.setdigit(pz, carry)
assert (carry >> SHIFT) == 0
i += 2
if size_a & 1:
pz = size_a1
f = a.widedigit(pz)
pb = 0
carry = _widen_digit(0)
while pb < size_b:
carry += z.widedigit(pz) + b.widedigit(pb) * f
pb += 1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
z.setdigit(pz, z.widedigit(pz) + carry)
z._normalize()
return z
def _kmul_split(n, size):
"""
A helper for Karatsuba multiplication (k_mul).
Takes a bigint "n" and an integer "size" representing the place to
split, and sets low and high such that abs(n) == (high << size) + low,
viewing the shift as being by digits. The sign bit is ignored, and
the return values are >= 0.
"""
size_n = n.numdigits()
size_lo = min(size_n, size)
# We use "or" her to avoid having a check where list can be empty in _normalize.
lo = rbigint(n._digits[:size_lo] or [NULLDIGIT], 1)
hi = rbigint(n._digits[size_lo:n.size] or [NULLDIGIT], 1)
lo._normalize()
hi._normalize()
return hi, lo
def _k_mul(a, b):
"""
Karatsuba multiplication. Ignores the input signs, and returns the
absolute value of the product (or raises if error).
See Knuth Vol. 2 Chapter 4.3.3 (Pp. 294-295).
"""
asize = a.numdigits()
bsize = b.numdigits()
# (ah*X+al)(bh*X+bl) = ah*bh*X*X + (ah*bl + al*bh)*X + al*bl
# Let k = (ah+al)*(bh+bl) = ah*bl + al*bh + ah*bh + al*bl
# Then the original product is
# ah*bh*X*X + (k - ah*bh - al*bl)*X + al*bl
# By picking X to be a power of 2, "*X" is just shifting, and it's
# been reduced to 3 multiplies on numbers half the size.
# Split a & b into hi & lo pieces.
shift = bsize >> 1
ah, al = _kmul_split(a, shift)
if ah.sign == 0:
# This may happen now that _k_lopsided_mul ain't catching it.
return _x_mul(a, b)
#assert ah.sign == 1 # the split isn't degenerate
if a is b:
bh = ah
bl = al
else:
bh, bl = _kmul_split(b, shift)
# The plan:
# 1. Allocate result space (asize + bsize digits: that's always
# enough).
# 2. Compute ah*bh, and copy into result at 2*shift.
# 3. Compute al*bl, and copy into result at 0. Note that this
# can't overlap with #2.
# 4. Subtract al*bl from the result, starting at shift. This may
# underflow (borrow out of the high digit), but we don't care:
# we're effectively doing unsigned arithmetic mod
# BASE**(sizea + sizeb), and so long as the *final* result fits,
# borrows and carries out of the high digit can be ignored.
# 5. Subtract ah*bh from the result, starting at shift.
# 6. Compute (ah+al)*(bh+bl), and add it into the result starting
# at shift.
# 1. Allocate result space.
ret = rbigint([NULLDIGIT] * (asize + bsize), 1)
# 2. t1 <- ah*bh, and copy into high digits of result.
t1 = ah.mul(bh)
assert t1.sign >= 0
assert 2*shift + t1.numdigits() <= ret.numdigits()
for i in range(t1.numdigits()):
ret._digits[2*shift + i] = t1._digits[i]
# Zero-out the digits higher than the ah*bh copy. */
## ignored, assuming that we initialize to zero
##i = ret->ob_size - 2*shift - t1->ob_size;
##if (i)
## memset(ret->ob_digit + 2*shift + t1->ob_size, 0,
## i * sizeof(digit));
# 3. t2 <- al*bl, and copy into the low digits.
t2 = al.mul(bl)
assert t2.sign >= 0
assert t2.numdigits() <= 2*shift # no overlap with high digits
for i in range(t2.numdigits()):
ret._digits[i] = t2._digits[i]
# Zero out remaining digits.
## ignored, assuming that we initialize to zero
##i = 2*shift - t2->ob_size; /* number of uninitialized digits */
##if (i)
## memset(ret->ob_digit + t2->ob_size, 0, i * sizeof(digit));
# 4 & 5. Subtract ah*bh (t1) and al*bl (t2). We do al*bl first
# because it's fresher in cache.
i = ret.numdigits() - shift # # digits after shift
_v_isub(ret, shift, i, t2, t2.numdigits())
_v_isub(ret, shift, i, t1, t1.numdigits())
# 6. t3 <- (ah+al)(bh+bl), and add into result.
t1 = _x_add(ah, al)
if a is b:
t2 = t1
else:
t2 = _x_add(bh, bl)
t3 = t1.mul(t2)
assert t3.sign >= 0
# Add t3. It's not obvious why we can't run out of room here.
# See the (*) comment after this function.
_v_iadd(ret, shift, i, t3, t3.numdigits())
ret._normalize()
return ret
""" (*) Why adding t3 can't "run out of room" above.
Let f(x) mean the floor of x and c(x) mean the ceiling of x. Some facts
to start with:
1. For any integer i, i = c(i/2) + f(i/2). In particular,
bsize = c(bsize/2) + f(bsize/2).
2. shift = f(bsize/2)
3. asize <= bsize
4. Since we call k_lopsided_mul if asize*2 <= bsize, asize*2 > bsize in this
routine, so asize > bsize/2 >= f(bsize/2) in this routine.
We allocated asize + bsize result digits, and add t3 into them at an offset
of shift. This leaves asize+bsize-shift allocated digit positions for t3
to fit into, = (by #1 and #2) asize + f(bsize/2) + c(bsize/2) - f(bsize/2) =
asize + c(bsize/2) available digit positions.
bh has c(bsize/2) digits, and bl at most f(size/2) digits. So bh+hl has
at most c(bsize/2) digits + 1 bit.
If asize == bsize, ah has c(bsize/2) digits, else ah has at most f(bsize/2)
digits, and al has at most f(bsize/2) digits in any case. So ah+al has at
most (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 1 bit.
The product (ah+al)*(bh+bl) therefore has at most
c(bsize/2) + (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits
and we have asize + c(bsize/2) available digit positions. We need to show
this is always enough. An instance of c(bsize/2) cancels out in both, so
the question reduces to whether asize digits is enough to hold
(asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits. If asize < bsize,
then we're asking whether asize digits >= f(bsize/2) digits + 2 bits. By #4,
asize is at least f(bsize/2)+1 digits, so this in turn reduces to whether 1
digit is enough to hold 2 bits. This is so since SHIFT=15 >= 2. If
asize == bsize, then we're asking whether bsize digits is enough to hold
c(bsize/2) digits + 2 bits, or equivalently (by #1) whether f(bsize/2) digits
is enough to hold 2 bits. This is so if bsize >= 2, which holds because
bsize >= KARATSUBA_CUTOFF >= 2.
Note that since there's always enough room for (ah+al)*(bh+bl), and that's
clearly >= each of ah*bh and al*bl, there's always enough room to subtract
ah*bh and al*bl too.
"""
def _k_lopsided_mul(a, b):
# Not in use anymore, only account for like 1% performance. Perhaps if we
# Got rid of the extra list allocation this would be more effective.
"""
b has at least twice the digits of a, and a is big enough that Karatsuba
would pay off *if* the inputs had balanced sizes. View b as a sequence
of slices, each with a->ob_size digits, and multiply the slices by a,
one at a time. This gives k_mul balanced inputs to work with, and is
also cache-friendly (we compute one double-width slice of the result
at a time, then move on, never bactracking except for the helpful
single-width slice overlap between successive partial sums).
"""
asize = a.numdigits()
bsize = b.numdigits()
# nbdone is # of b digits already multiplied
assert asize > KARATSUBA_CUTOFF
assert 2 * asize <= bsize
# Allocate result space, and zero it out.
ret = rbigint([NULLDIGIT] * (asize + bsize), 1)
# Successive slices of b are copied into bslice.
#bslice = rbigint([0] * asize, 1)
# XXX we cannot pre-allocate, see comments below!
# XXX prevent one list from being created.
bslice = rbigint(sign=1)
nbdone = 0
while bsize > 0:
nbtouse = min(bsize, asize)
# Multiply the next slice of b by a.
#bslice.digits[:nbtouse] = b.digits[nbdone : nbdone + nbtouse]
# XXX: this would be more efficient if we adopted CPython's
# way to store the size, instead of resizing the list!
# XXX change the implementation, encoding length via the sign.
bslice._digits = b._digits[nbdone : nbdone + nbtouse]
bslice.size = nbtouse
product = _k_mul(a, bslice)
# Add into result.
_v_iadd(ret, nbdone, ret.numdigits() - nbdone,
product, product.numdigits())
bsize -= nbtouse
nbdone += nbtouse
ret._normalize()
return ret
def _inplace_divrem1(pout, pin, n):
"""
Divide bigint pin by non-zero digit n, storing quotient
in pout, and returning the remainder. It's OK for pin == pout on entry.
"""
rem = _widen_digit(0)
assert n > 0 and n <= MASK
size = pin.numdigits() - 1
while size >= 0:
rem = (rem << SHIFT) | pin.widedigit(size)
hi = rem // n
pout.setdigit(size, hi)
rem -= hi * n
size -= 1
return rffi.cast(lltype.Signed, rem)
def _divrem1(a, n):
"""
Divide a bigint integer by a digit, returning both the quotient
and the remainder as a tuple.
The sign of a is ignored; n should not be zero.
"""
assert n > 0 and n <= MASK
size = a.numdigits()
z = rbigint([NULLDIGIT] * size, 1, size)
rem = _inplace_divrem1(z, a, n)
z._normalize()
return z, rem
def _v_iadd(x, xofs, m, y, n):
"""
x and y are rbigints, m >= n required. x.digits[0:n] is modified in place,
by adding y.digits[0:m] to it. Carries are propagated as far as
x[m-1], and the remaining carry (0 or 1) is returned.
Python adaptation: x is addressed relative to xofs!
"""
carry = UDIGIT_TYPE(0)
assert m >= n
i = _load_unsigned_digit(xofs)
iend = xofs + n
while i < iend:
carry += x.udigit(i) + y.udigit(i-xofs)
x.setdigit(i, carry)
carry >>= SHIFT
i += 1
iend = xofs + m
while carry and i < iend:
carry += x.udigit(i)
x.setdigit(i, carry)
carry >>= SHIFT
i += 1
return carry
def _v_isub(x, xofs, m, y, n):
"""
x and y are rbigints, m >= n required. x.digits[0:n] is modified in place,
by substracting y.digits[0:m] to it. Borrows are propagated as
far as x[m-1], and the remaining borrow (0 or 1) is returned.
Python adaptation: x is addressed relative to xofs!
"""
borrow = UDIGIT_TYPE(0)
assert m >= n
i = _load_unsigned_digit(xofs)
iend = xofs + n
while i < iend:
borrow = x.udigit(i) - y.udigit(i-xofs) - borrow
x.setdigit(i, borrow)
borrow >>= SHIFT
borrow &= 1 # keep only 1 sign bit
i += 1
iend = xofs + m
while borrow and i < iend:
borrow = x.udigit(i) - borrow
x.setdigit(i, borrow)
borrow >>= SHIFT
borrow &= 1
i += 1
return borrow
@specialize.argtype(2)
def _muladd1(a, n, extra=0):
"""Multiply by a single digit and add a single digit, ignoring the sign.
"""
size_a = a.numdigits()
z = rbigint([NULLDIGIT] * (size_a+1), 1)
assert extra & MASK == extra
carry = _widen_digit(extra)
i = 0
while i < size_a:
carry += a.widedigit(i) * n
z.setdigit(i, carry)
carry >>= SHIFT
i += 1
z.setdigit(i, carry)
z._normalize()
return z
def _v_lshift(z, a, m, d):
""" Shift digit vector a[0:m] d bits left, with 0 <= d < SHIFT. Put
* result in z[0:m], and return the d bits shifted out of the top.
"""
carry = 0
assert 0 <= d and d < SHIFT
i = 0
while i < m:
acc = a.widedigit(i) << d | carry
z.setdigit(i, acc)
carry = acc >> SHIFT
i += 1
return carry
def _v_rshift(z, a, m, d):
""" Shift digit vector a[0:m] d bits right, with 0 <= d < PyLong_SHIFT. Put
* result in z[0:m], and return the d bits shifted out of the bottom.
"""
carry = _widen_digit(0)
acc = _widen_digit(0)
mask = (1 << d) - 1
assert 0 <= d and d < SHIFT
i = m-1
while i >= 0:
acc = (carry << SHIFT) | a.widedigit(i)
carry = acc & mask
z.setdigit(i, acc >> d)
i -= 1
return carry
def _x_divrem(v1, w1):
""" Unsigned bigint division with remainder -- the algorithm """
size_v = v1.numdigits()
size_w = w1.numdigits()
assert size_v >= size_w and size_w > 1
v = rbigint([NULLDIGIT] * (size_v + 1), 1, size_v + 1)
w = rbigint([NULLDIGIT] * size_w, 1, size_w)
""" normalize: shift w1 left so that its top digit is >= PyLong_BASE/2.
shift v1 left by the same amount. Results go into w and v. """
d = SHIFT - bits_in_digit(w1.digit(abs(size_w-1)))
carry = _v_lshift(w, w1, size_w, d)
assert carry == 0
carry = _v_lshift(v, v1, size_v, d)
if carry != 0 or v.digit(abs(size_v-1)) >= w.digit(abs(size_w-1)):
v.setdigit(size_v, carry)
size_v += 1
""" Now v->ob_digit[size_v-1] < w->ob_digit[size_w-1], so quotient has
at most (and usually exactly) k = size_v - size_w digits. """
k = size_v - size_w
if k == 0:
# We can't use v1, nor NULLRBIGINT here as some function modify the result.
assert _v_rshift(w, v, size_w, d) == 0
w._normalize()
return rbigint([NULLDIGIT]), w
assert k > 0
a = rbigint([NULLDIGIT] * k, 1, k)
wm1 = w.widedigit(abs(size_w-1))
wm2 = w.widedigit(abs(size_w-2))
j = size_v - 1
k -= 1
while k >= 0:
assert j >= 0
""" inner loop: divide vk[0:size_w+1] by w0[0:size_w], giving
single-digit quotient q, remainder in vk[0:size_w]. """
# estimate quotient digit q; may overestimate by 1 (rare)
if j >= size_v:
vtop = 0
else:
vtop = v.widedigit(j)
assert vtop <= wm1
vv = (vtop << SHIFT) | v.widedigit(abs(j-1))
q = vv / wm1
r = vv - wm1 * q
while wm2 * q > ((r << SHIFT) | v.widedigit(abs(j-2))):
q -= 1
r += wm1
#assert q <= MASK+1, We need to compare to BASE <=, but ehm, it gives a buildin long error. So we ignore this.
# subtract q*w0[0:size_w] from vk[0:size_w+1]
zhi = 0
i = 0
while i < size_w:
z = v.widedigit(k+i) + zhi - q * w.widedigit(i)
v.setdigit(k+i, z)
zhi = z >> SHIFT
i += 1
# add w back if q was too large (this branch taken rarely)
if vtop + zhi < 0:
carry = UDIGIT_TYPE(0)
i = 0
while i < size_w:
carry += v.udigit(k+i) + w.udigit(i)
v.setdigit(k+i, carry)
carry >>= SHIFT
i += 1
q -= 1
# store quotient digit
a.setdigit(k, q)
k -= 1
j -= 1
carry = _v_rshift(w, v, size_w, d)
assert carry == 0
a._normalize()
w._normalize()
return a, w
def _divrem(a, b):
""" Long division with remainder, top-level routine """
size_a = a.numdigits()
size_b = b.numdigits()
if b.sign == 0:
raise ZeroDivisionError("long division or modulo by zero")
if (size_a < size_b or
(size_a == size_b and
a.digit(abs(size_a-1)) < b.digit(abs(size_b-1)))):
# |a| < |b|
return NULLRBIGINT, a# result is 0
if size_b == 1:
z, urem = _divrem1(a, b.digit(0))
rem = rbigint([_store_digit(urem)], int(urem != 0), 1)
else:
z, rem = _x_divrem(a, b)
# Set the signs.
# The quotient z has the sign of a*b;
# the remainder r has the sign of a,
# so a = b*z + r.
if a.sign != b.sign:
z.sign = - z.sign
if a.sign < 0 and rem.sign != 0:
rem.sign = - rem.sign
return z, rem
# ______________ conversions to double _______________
def _AsScaledDouble(v):
"""
NBITS_WANTED should be > the number of bits in a double's precision,
but small enough so that 2**NBITS_WANTED is within the normal double
range. nbitsneeded is set to 1 less than that because the most-significant
Python digit contains at least 1 significant bit, but we don't want to
bother counting them (catering to the worst case cheaply).
57 is one more than VAX-D double precision; I (Tim) don't know of a double
format with more precision than that; it's 1 larger so that we add in at
least one round bit to stand in for the ignored least-significant bits.
"""
NBITS_WANTED = 57
if v.sign == 0:
return 0.0, 0
i = v.numdigits() - 1
sign = v.sign
x = float(v.digit(i))
nbitsneeded = NBITS_WANTED - 1
# Invariant: i Python digits remain unaccounted for.
while i > 0 and nbitsneeded > 0:
i -= 1
x = x * FLOAT_MULTIPLIER + float(v.digit(i))
nbitsneeded -= SHIFT
# There are i digits we didn't shift in. Pretending they're all
# zeroes, the true value is x * 2**(i*SHIFT).
exponent = i
assert x > 0.0
return x * sign, exponent
##def ldexp(x, exp):
## assert type(x) is float
## lb1 = LONG_BIT - 1
## multiplier = float(1 << lb1)
## while exp >= lb1:
## x *= multiplier
## exp -= lb1
## if exp:
## x *= float(1 << exp)
## return x
# note that math.ldexp checks for overflows,
# while the C ldexp is not guaranteed to do.
# XXX make sure that we don't ignore this!
# YYY no, we decided to do ignore this!
@jit.dont_look_inside
def _AsDouble(n):
""" Get a C double from a bigint object. """
# This is a "correctly-rounded" version from Python 2.7.
#
from rpython.rlib import rfloat
DBL_MANT_DIG = rfloat.DBL_MANT_DIG # 53 for IEEE 754 binary64
DBL_MAX_EXP = rfloat.DBL_MAX_EXP # 1024 for IEEE 754 binary64
assert DBL_MANT_DIG < r_ulonglong.BITS
# Reduce to case n positive.
sign = n.sign
if sign == 0:
return 0.0
elif sign < 0:
n = n.neg()
# Find exponent: 2**(exp - 1) <= n < 2**exp
exp = n.bit_length()
# Get top DBL_MANT_DIG + 2 significant bits of n, with a 'sticky'
# last bit: that is, the least significant bit of the result is 1
# iff any of the shifted-out bits is set.
shift = DBL_MANT_DIG + 2 - exp
if shift >= 0:
q = _AsULonglong_mask(n) << shift
if not we_are_translated():
assert q == n.tolong() << shift # no masking actually done
else:
shift = -shift
n2 = n.rshift(shift)
q = _AsULonglong_mask(n2)
if not we_are_translated():
assert q == n2.tolong() # no masking actually done
if not n.eq(n2.lshift(shift)):
q |= 1
# Now remove the excess 2 bits, rounding to nearest integer (with
# ties rounded to even).
q = (q >> 2) + r_uint((bool(q & 2) and bool(q & 5)))
if exp > DBL_MAX_EXP or (exp == DBL_MAX_EXP and
q == r_ulonglong(1) << DBL_MANT_DIG):
raise OverflowError("integer too large to convert to float")
ad = math.ldexp(float(q), exp - DBL_MANT_DIG)
if sign < 0:
ad = -ad
return ad
@specialize.arg(0)
def _loghelper(func, arg):
"""
A decent logarithm is easy to compute even for huge bigints, but libm can't
do that by itself -- loghelper can. func is log or log10.
Note that overflow isn't possible: a bigint can contain
no more than INT_MAX * SHIFT bits, so has value certainly less than
2**(2**64 * 2**16) == 2**2**80, and log2 of that is 2**80, which is
small enough to fit in an IEEE single. log and log10 are even smaller.
"""
x, e = _AsScaledDouble(arg)
if x <= 0.0:
raise ValueError
# Value is ~= x * 2**(e*SHIFT), so the log ~=
# log(x) + log(2) * e * SHIFT.
# CAUTION: e*SHIFT may overflow using int arithmetic,
# so force use of double. */
return func(x) + (e * float(SHIFT) * func(2.0))
# ____________________________________________________________
BASE_AS_FLOAT = float(1 << SHIFT) # note that it may not fit an int
BitLengthTable = ''.join(map(chr, [
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]))
def bits_in_digit(d):
# returns the unique integer k such that 2**(k-1) <= d <
# 2**k if d is nonzero, else 0.
d_bits = 0
while d >= 32:
d_bits += 6
d >>= 6
d_bits += ord(BitLengthTable[d])
return d_bits
def _truediv_result(result, negate):
if negate:
result = -result
return result
def _truediv_overflow():
raise OverflowError("integer division result too large for a float")
def _bigint_true_divide(a, b):
# A longish method to obtain the floating-point result with as much
# precision as theoretically possible. The code is almost directly
# copied from CPython. See there (Objects/longobject.c,
# long_true_divide) for detailled comments. Method in a nutshell:
#
# 0. reduce to case a, b > 0; filter out obvious underflow/overflow
# 1. choose a suitable integer 'shift'
# 2. use integer arithmetic to compute x = floor(2**-shift*a/b)
# 3. adjust x for correct rounding
# 4. convert x to a double dx with the same value
# 5. return ldexp(dx, shift).
from rpython.rlib import rfloat
DBL_MANT_DIG = rfloat.DBL_MANT_DIG # 53 for IEEE 754 binary64
DBL_MAX_EXP = rfloat.DBL_MAX_EXP # 1024 for IEEE 754 binary64
DBL_MIN_EXP = rfloat.DBL_MIN_EXP
MANT_DIG_DIGITS = DBL_MANT_DIG // SHIFT
MANT_DIG_BITS = DBL_MANT_DIG % SHIFT
# Reduce to case where a and b are both positive.
negate = (a.sign < 0) ^ (b.sign < 0)
if not b.tobool():
raise ZeroDivisionError("long division or modulo by zero")
if not a.tobool():
return _truediv_result(0.0, negate)
a_size = a.numdigits()
b_size = b.numdigits()
# Fast path for a and b small (exactly representable in a double).
# Relies on floating-point division being correctly rounded; results
# may be subject to double rounding on x86 machines that operate with
# the x87 FPU set to 64-bit precision.
a_is_small = (a_size <= MANT_DIG_DIGITS or
(a_size == MANT_DIG_DIGITS+1 and
a.digit(MANT_DIG_DIGITS) >> MANT_DIG_BITS == 0))
b_is_small = (b_size <= MANT_DIG_DIGITS or
(b_size == MANT_DIG_DIGITS+1 and
b.digit(MANT_DIG_DIGITS) >> MANT_DIG_BITS == 0))
if a_is_small and b_is_small:
a_size -= 1
da = float(a.digit(a_size))
while True:
a_size -= 1
if a_size < 0:
break
da = da * BASE_AS_FLOAT + a.digit(a_size)
b_size -= 1
db = float(b.digit(b_size))
while True:
b_size -= 1
if b_size < 0:
break
db = db * BASE_AS_FLOAT + b.digit(b_size)
return _truediv_result(da / db, negate)
# Catch obvious cases of underflow and overflow
diff = a_size - b_size
if diff > sys.maxint/SHIFT - 1:
return _truediv_overflow() # Extreme overflow
elif diff < 1 - sys.maxint/SHIFT:
return _truediv_result(0.0, negate) # Extreme underflow
# Next line is now safe from overflowing integers
diff = (diff * SHIFT + bits_in_digit(a.digit(a_size - 1)) -
bits_in_digit(b.digit(b_size - 1)))
# Now diff = a_bits - b_bits.
if diff > DBL_MAX_EXP:
return _truediv_overflow()
elif diff < DBL_MIN_EXP - DBL_MANT_DIG - 1:
return _truediv_result(0.0, negate)
# Choose value for shift; see comments for step 1 in CPython.
shift = max(diff, DBL_MIN_EXP) - DBL_MANT_DIG - 2
inexact = False
# x = abs(a * 2**-shift)
if shift <= 0:
x = a.lshift(-shift)
else:
x = a.rshift(shift, dont_invert=True)
# set inexact if any of the bits shifted out is nonzero
if not a.eq(x.lshift(shift)):
inexact = True
# x //= b. If the remainder is nonzero, set inexact.
x, rem = _divrem(x, b)
if rem.tobool():
inexact = True
assert x.tobool() # result of division is never zero
x_size = x.numdigits()
x_bits = (x_size-1)*SHIFT + bits_in_digit(x.digit(x_size-1))
# The number of extra bits that have to be rounded away.
extra_bits = max(x_bits, DBL_MIN_EXP - shift) - DBL_MANT_DIG
assert extra_bits == 2 or extra_bits == 3
# Round by remembering a modified copy of the low digit of x
mask = r_uint(1 << (extra_bits - 1))
low = x.udigit(0) | r_uint(inexact)
if (low & mask) != 0 and (low & (3*mask-1)) != 0:
low += mask
x_digit_0 = low & ~(mask-1)
# Convert x to a double dx; the conversion is exact.
x_size -= 1
dx = 0.0
while x_size > 0:
dx += x.digit(x_size)
dx *= BASE_AS_FLOAT
x_size -= 1
dx += x_digit_0
# Check whether ldexp result will overflow a double.
if (shift + x_bits >= DBL_MAX_EXP and
(shift + x_bits > DBL_MAX_EXP or dx == math.ldexp(1.0, x_bits))):
return _truediv_overflow()
return _truediv_result(math.ldexp(dx, shift), negate)
# ____________________________________________________________
BASE8 = '01234567'
BASE10 = '0123456789'
BASE16 = '0123456789abcdef'
def _format_base2_notzero(a, digits, prefix='', suffix=''):
base = len(digits)
# JRH: special case for power-of-2 bases
accum = 0
accumbits = 0 # # of bits in accum
basebits = 0
i = base
while i > 1:
basebits += 1
i >>= 1
# Compute a rough upper bound for the length of the string
size_a = a.numdigits()
i = 5 + len(prefix) + len(suffix) + (size_a*SHIFT + basebits-1) // basebits
result = [chr(0)] * i
next_char_index = i
j = len(suffix)
while j > 0:
next_char_index -= 1
j -= 1
result[next_char_index] = suffix[j]
i = 0
while i < size_a:
accum |= a.widedigit(i) << accumbits
accumbits += SHIFT
assert accumbits >= basebits
while 1:
cdigit = intmask(accum & (base - 1))
next_char_index -= 1
assert next_char_index >= 0
result[next_char_index] = digits[cdigit]
accumbits -= basebits
accum >>= basebits
if i < size_a - 1:
if accumbits < basebits:
break
else:
if accum <= 0:
break
i += 1
j = len(prefix)
while j > 0:
next_char_index -= 1
j -= 1
result[next_char_index] = prefix[j]
if a.sign < 0:
next_char_index -= 1
result[next_char_index] = '-'
assert next_char_index >= 0 # otherwise, buffer overflow (this is also a
# hint for the annotator for the slice below)
return ''.join(result[next_char_index:])
class _PartsCache(object):
def __init__(self):
# 36 - 3, because bases 0, 1 make no sense
# and 2 is handled differently
self.parts_cache = [None] * 34
self.mindigits = [0] * 34
for i in range(34):
base = i + 3
mindigits = 1
while base ** mindigits < sys.maxint:
mindigits += 1
mindigits -= 1
self.mindigits[i] = mindigits
def get_cached_parts(self, base):
index = base - 3
res = self.parts_cache[index]
if res is None:
rbase = rbigint.fromint(base)
part = rbase.pow(rbigint.fromint(self.mindigits[index]))
res = [part]
self.parts_cache[base - 3] = res
return res
def get_mindigits(self, base):
return self.mindigits[base - 3]
_parts_cache = _PartsCache()
def _format_int_general(val, digits):
base = len(digits)
out = []
while val:
out.append(digits[val % base])
val //= base
out.reverse()
return "".join(out)
def _format_int10(val, digits):
return str(val)
@specialize.arg(7)
def _format_recursive(x, i, output, pts, digits, size_prefix, mindigits, _format_int):
# bottomed out with min_digit sized pieces
# use str of ints
if i < 0:
# this checks whether any digit has been appended yet
if output.getlength() == size_prefix:
if x.sign != 0:
s = _format_int(x.toint(), digits)
output.append(s)
else:
s = _format_int(x.toint(), digits)
output.append_multiple_char(digits[0], mindigits - len(s))
output.append(s)
else:
top, bot = x.divmod(pts[i]) # split the number
_format_recursive(top, i-1, output, pts, digits, size_prefix, mindigits, _format_int)
_format_recursive(bot, i-1, output, pts, digits, size_prefix, mindigits, _format_int)
def _format(x, digits, prefix='', suffix=''):
if x.sign == 0:
return prefix + "0" + suffix
base = len(digits)
assert base >= 2 and base <= 36
if (base & (base - 1)) == 0:
return _format_base2_notzero(x, digits, prefix, suffix)
negative = x.sign < 0
if negative:
x = x.neg()
rbase = rbigint.fromint(base)
two = rbigint.fromint(2)
pts = _parts_cache.get_cached_parts(base)
mindigits = _parts_cache.get_mindigits(base)
stringsize = mindigits
startindex = 0
for startindex, part in enumerate(pts):
if not part.lt(x):
break
stringsize *= 2 # XXX can this overflow on 32 bit?
else:
# not enough parts computed yet
while pts[-1].lt(x):
pts.append(pts[-1].pow(two))
stringsize *= 2
startindex = len(pts) - 1
# remove first base**2**i greater than x
startindex -= 1
output = StringBuilder(stringsize)
if negative:
output.append('-')
output.append(prefix)
if digits == BASE10:
_format_recursive(
x, startindex, output, pts, digits, output.getlength(), mindigits,
_format_int10)
else:
_format_recursive(
x, startindex, output, pts, digits, output.getlength(), mindigits,
_format_int_general)
output.append(suffix)
return output.build()
@specialize.arg(1)
def _bitwise(a, op, b): # '&', '|', '^'
""" Bitwise and/or/xor operations """
if a.sign < 0:
a = a.invert()
maska = MASK
else:
maska = 0
if b.sign < 0:
b = b.invert()
maskb = MASK
else:
maskb = 0
negz = 0
if op == '^':
if maska != maskb:
maska ^= MASK
negz = -1
elif op == '&':
if maska and maskb:
op = '|'
maska ^= MASK
maskb ^= MASK
negz = -1
elif op == '|':
if maska or maskb:
op = '&'
maska ^= MASK
maskb ^= MASK
negz = -1
else:
assert 0, "unreachable"
# JRH: The original logic here was to allocate the result value (z)
# as the longer of the two operands. However, there are some cases
# where the result is guaranteed to be shorter than that: AND of two
# positives, OR of two negatives: use the shorter number. AND with
# mixed signs: use the positive number. OR with mixed signs: use the
# negative number. After the transformations above, op will be '&'
# iff one of these cases applies, and mask will be non-0 for operands
# whose length should be ignored.
size_a = a.numdigits()
size_b = b.numdigits()
if op == '&':
if maska:
size_z = size_b
else:
if maskb:
size_z = size_a
else:
size_z = min(size_a, size_b)
else:
size_z = max(size_a, size_b)
z = rbigint([NULLDIGIT] * size_z, 1, size_z)
i = 0
while i < size_z:
if i < size_a:
diga = a.digit(i) ^ maska
else:
diga = maska
if i < size_b:
digb = b.digit(i) ^ maskb
else:
digb = maskb
if op == '&':
z.setdigit(i, diga & digb)
elif op == '|':
z.setdigit(i, diga | digb)
elif op == '^':
z.setdigit(i, diga ^ digb)
i += 1
z._normalize()
if negz == 0:
return z
return z.invert()
@specialize.arg(1)
def _int_bitwise(a, op, b): # '&', '|', '^'
""" Bitwise and/or/xor operations """
if not int_in_valid_range(b):
# Fallback to long.
return _bitwise(a, op, rbigint.fromint(b))
if a.sign < 0:
a = a.invert()
maska = MASK
else:
maska = 0
if b < 0:
b = ~b
maskb = MASK
else:
maskb = 0
negz = 0
if op == '^':
if maska != maskb:
maska ^= MASK
negz = -1
elif op == '&':
if maska and maskb:
op = '|'
maska ^= MASK
maskb ^= MASK
negz = -1
elif op == '|':
if maska or maskb:
op = '&'
maska ^= MASK
maskb ^= MASK
negz = -1
# JRH: The original logic here was to allocate the result value (z)
# as the longer of the two operands. However, there are some cases
# where the result is guaranteed to be shorter than that: AND of two
# positives, OR of two negatives: use the shorter number. AND with
# mixed signs: use the positive number. OR with mixed signs: use the
# negative number. After the transformations above, op will be '&'
# iff one of these cases applies, and mask will be non-0 for operands
# whose length should be ignored.
size_a = a.numdigits()
if op == '&':
if maska:
size_z = 1
else:
if maskb:
size_z = size_a
else:
size_z = 1
else:
size_z = size_a
z = rbigint([NULLDIGIT] * size_z, 1, size_z)
i = 0
while i < size_z:
if i < size_a:
diga = a.digit(i) ^ maska
else:
diga = maska
if i == 0:
digb = b ^ maskb
else:
digb = maskb
if op == '&':
z.setdigit(i, diga & digb)
elif op == '|':
z.setdigit(i, diga | digb)
elif op == '^':
z.setdigit(i, diga ^ digb)
i += 1
z._normalize()
if negz == 0:
return z
return z.invert()
ULONGLONG_BOUND = r_ulonglong(1L << (r_longlong.BITS-1))
LONGLONG_MIN = r_longlong(-(1L << (r_longlong.BITS-1)))
def _AsLongLong(v):
"""
Get a r_longlong integer from a bigint object.
Raises OverflowError if overflow occurs.
"""
x = _AsULonglong_ignore_sign(v)
# grr grr grr
if x >= ULONGLONG_BOUND:
if x == ULONGLONG_BOUND and v.sign < 0:
x = LONGLONG_MIN
else:
raise OverflowError
else:
x = r_longlong(x)
if v.sign < 0:
x = -x
return x
def _AsULonglong_ignore_sign(v):
x = r_ulonglong(0)
i = v.numdigits() - 1
while i >= 0:
prev = x
x = (x << SHIFT) + r_ulonglong(v.widedigit(i))
if (x >> SHIFT) != prev:
raise OverflowError(
"long int too large to convert to unsigned long long int")
i -= 1
return x
def make_unsigned_mask_conversion(T):
def _As_unsigned_mask(v):
x = T(0)
i = v.numdigits() - 1
while i >= 0:
x = (x << SHIFT) + T(v.digit(i))
i -= 1
if v.sign < 0:
x = -x
return x
return _As_unsigned_mask
_AsULonglong_mask = make_unsigned_mask_conversion(r_ulonglong)
_AsUInt_mask = make_unsigned_mask_conversion(r_uint)
def _hash(v):
# This is designed so that Python ints and longs with the
# same value hash to the same value, otherwise comparisons
# of mapping keys will turn out weird. Moreover, purely
# to please decimal.py, we return a hash that satisfies
# hash(x) == hash(x % ULONG_MAX). In particular, this
# implies that hash(x) == hash(x % (2**64-1)).
i = v.numdigits() - 1
sign = v.sign
x = r_uint(0)
LONG_BIT_SHIFT = LONG_BIT - SHIFT
while i >= 0:
# Force a native long #-bits (32 or 64) circular shift
x = (x << SHIFT) | (x >> LONG_BIT_SHIFT)
x += v.udigit(i)
# If the addition above overflowed we compensate by
# incrementing. This preserves the value modulo
# ULONG_MAX.
if x < v.udigit(i):
x += 1
i -= 1
res = intmask(intmask(x) * sign)
return res
#_________________________________________________________________
# a few internal helpers
def digits_max_for_base(base):
dec_per_digit = 1
while base ** dec_per_digit < MASK:
dec_per_digit += 1
dec_per_digit -= 1
return base ** dec_per_digit
BASE_MAX = [0, 1] + [digits_max_for_base(_base) for _base in range(2, 37)]
DEC_MAX = digits_max_for_base(10)
assert DEC_MAX == BASE_MAX[10]
def _decimalstr_to_bigint(s):
# a string that has been already parsed to be decimal and valid,
# is turned into a bigint
p = 0
lim = len(s)
sign = False
if s[p] == '-':
sign = True
p += 1
elif s[p] == '+':
p += 1
a = rbigint()
tens = 1
dig = 0
ord0 = ord('0')
while p < lim:
dig = dig * 10 + ord(s[p]) - ord0
p += 1
tens *= 10
if tens == DEC_MAX or p == lim:
a = _muladd1(a, tens, dig)
tens = 1
dig = 0
if sign and a.sign == 1:
a.sign = -1
return a
def parse_digit_string(parser):
# helper for fromstr
base = parser.base
if (base & (base - 1)) == 0 and base >= 2:
return parse_string_from_binary_base(parser)
a = rbigint()
digitmax = BASE_MAX[base]
tens, dig = 1, 0
while True:
digit = parser.next_digit()
if tens == digitmax or digit < 0:
a = _muladd1(a, tens, dig)
if digit < 0:
break
dig = digit
tens = base
else:
dig = dig * base + digit
tens *= base
a.sign *= parser.sign
return a
def parse_string_from_binary_base(parser):
# The point to this routine is that it takes time linear in the number of
# string characters.
from rpython.rlib.rstring import ParseStringError
base = parser.base
if base == 2: bits_per_char = 1
elif base == 4: bits_per_char = 2
elif base == 8: bits_per_char = 3
elif base == 16: bits_per_char = 4
elif base == 32: bits_per_char = 5
else:
raise AssertionError
# n <- total number of bits needed, while moving 'parser' to the end
n = 0
while parser.next_digit() >= 0:
n += 1
# b <- number of Python digits needed, = ceiling(n/SHIFT). */
try:
b = ovfcheck(n * bits_per_char)
b = ovfcheck(b + (SHIFT - 1))
except OverflowError:
raise ParseStringError("long string too large to convert")
b = (b // SHIFT) or 1
z = rbigint([NULLDIGIT] * b, sign=parser.sign)
# Read string from right, and fill in long from left; i.e.,
# from least to most significant in both.
accum = _widen_digit(0)
bits_in_accum = 0
pdigit = 0
for _ in range(n):
k = parser.prev_digit()
accum |= _widen_digit(k) << bits_in_accum
bits_in_accum += bits_per_char
if bits_in_accum >= SHIFT:
z.setdigit(pdigit, accum)
pdigit += 1
assert pdigit <= b
accum >>= SHIFT
bits_in_accum -= SHIFT
if bits_in_accum:
z.setdigit(pdigit, accum)
z._normalize()
return z
| 30.706975 | 119 | 0.550013 | from rpython.rlib.rarithmetic import LONG_BIT, intmask, longlongmask, r_uint, r_ulonglong
from rpython.rlib.rarithmetic import ovfcheck, r_longlong, widen
from rpython.rlib.rarithmetic import most_neg_value_of_same_type
from rpython.rlib.rarithmetic import check_support_int128
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.debug import make_sure_not_resized, check_regular_int
from rpython.rlib.objectmodel import we_are_translated, specialize, not_rpython
from rpython.rlib import jit
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper import extregistry
import math, sys
SUPPORT_INT128 = check_support_int128()
BYTEORDER = sys.byteorder
if SUPPORT_INT128:
SHIFT = 63
UDIGIT_TYPE = r_ulonglong
if LONG_BIT >= 64:
UDIGIT_MASK = intmask
else:
UDIGIT_MASK = longlongmask
LONG_TYPE = rffi.__INT128_T
if LONG_BIT > SHIFT:
STORE_TYPE = lltype.Signed
UNSIGNED_TYPE = lltype.Unsigned
else:
STORE_TYPE = rffi.LONGLONG
UNSIGNED_TYPE = rffi.ULONGLONG
else:
SHIFT = 31
UDIGIT_TYPE = r_uint
UDIGIT_MASK = intmask
STORE_TYPE = lltype.Signed
UNSIGNED_TYPE = lltype.Unsigned
LONG_TYPE = rffi.LONGLONG
MASK = int((1 << SHIFT) - 1)
FLOAT_MULTIPLIER = float(1 << SHIFT)
if SHIFT == LONG_BIT - 1:
MIN_INT_VALUE = -1 << SHIFT
def int_in_valid_range(x):
if x == MIN_INT_VALUE:
return False
return True
else:
def int_in_valid_range(x):
if x > MASK or x < -MASK:
return False
return True
int_in_valid_range._always_inline_ = True
# Debugging digit array access.
#
# False == no checking at all
# True == check 0 <= value <= MASK
# For long multiplication, use the O(N**2) school algorithm unless
# both operands contain more than KARATSUBA_CUTOFF digits (this
# being an internal Python long digit, in base BASE).
# Karatsuba is O(N**1.585)
USE_KARATSUBA = True # set to False for comparison
if SHIFT > 31:
KARATSUBA_CUTOFF = 19
else:
KARATSUBA_CUTOFF = 38
KARATSUBA_SQUARE_CUTOFF = 2 * KARATSUBA_CUTOFF
# For exponentiation, use the binary left-to-right algorithm
# unless the exponent contains more than FIVEARY_CUTOFF digits.
# In that case, do 5 bits at a time. The potential drawback is that
# a table of 2**5 intermediate results is computed.
FIVEARY_CUTOFF = 8
@specialize.argtype(0)
def _mask_digit(x):
return UDIGIT_MASK(x & MASK)
def _widen_digit(x):
return rffi.cast(LONG_TYPE, x)
@specialize.argtype(0)
def _store_digit(x):
return rffi.cast(STORE_TYPE, x)
def _load_unsigned_digit(x):
return rffi.cast(UNSIGNED_TYPE, x)
_load_unsigned_digit._always_inline_ = True
NULLDIGIT = _store_digit(0)
ONEDIGIT = _store_digit(1)
def _check_digits(l):
for x in l:
assert type(x) is type(NULLDIGIT)
assert UDIGIT_MASK(x) & MASK == UDIGIT_MASK(x)
class InvalidEndiannessError(Exception):
pass
class InvalidSignednessError(Exception):
pass
class Entry(extregistry.ExtRegistryEntry):
_about_ = _check_digits
def compute_result_annotation(self, s_list):
from rpython.annotator import model as annmodel
assert isinstance(s_list, annmodel.SomeList)
s_DIGIT = self.bookkeeper.valueoftype(type(NULLDIGIT))
assert s_DIGIT.contains(s_list.listdef.listitem.s_value)
def specialize_call(self, hop):
hop.exception_cannot_occur()
class rbigint(object):
"""This is a reimplementation of longs using a list of digits."""
_immutable_ = True
_immutable_fields_ = ["_digits"]
def __init__(self, digits=[NULLDIGIT], sign=0, size=0):
if not we_are_translated():
_check_digits(digits)
make_sure_not_resized(digits)
self._digits = digits
assert size >= 0
self.size = size or len(digits)
self.sign = sign
# __eq__ and __ne__ method exist for testingl only, they are not RPython!
@not_rpython
def __eq__(self, other):
if not isinstance(other, rbigint):
return NotImplemented
return self.eq(other)
@not_rpython
def __ne__(self, other):
return not (self == other)
def digit(self, x):
"""Return the x'th digit, as an int."""
return self._digits[x]
digit._always_inline_ = True
def widedigit(self, x):
"""Return the x'th digit, as a long long int if needed
to have enough room to contain two digits."""
return _widen_digit(self._digits[x])
widedigit._always_inline_ = True
def udigit(self, x):
"""Return the x'th digit, as an unsigned int."""
return _load_unsigned_digit(self._digits[x])
udigit._always_inline_ = True
@specialize.argtype(2)
def setdigit(self, x, val):
val = _mask_digit(val)
assert val >= 0
self._digits[x] = _store_digit(val)
setdigit._always_inline_ = True
def numdigits(self):
return self.size
numdigits._always_inline_ = True
@staticmethod
@jit.elidable
def fromint(intval):
check_regular_int(intval)
if intval < 0:
sign = -1
ival = -r_uint(intval)
elif intval > 0:
sign = 1
ival = r_uint(intval)
else:
return NULLRBIGINT
carry = ival >> SHIFT
if carry:
return rbigint([_store_digit(ival & MASK),
_store_digit(carry)], sign, 2)
else:
return rbigint([_store_digit(ival & MASK)], sign, 1)
@staticmethod
@jit.elidable
def frombool(b):
if b:
return ONERBIGINT
return NULLRBIGINT
@staticmethod
@not_rpython
def fromlong(l):
return rbigint(*args_from_long(l))
@staticmethod
@jit.elidable
def fromfloat(dval):
""" Create a new bigint object from a float """
if math.isinf(dval):
raise OverflowError("cannot convert float infinity to integer")
if math.isnan(dval):
raise ValueError("cannot convert float NaN to integer")
return rbigint._fromfloat_finite(dval)
@staticmethod
@jit.elidable
def _fromfloat_finite(dval):
sign = 1
if dval < 0.0:
sign = -1
dval = -dval
frac, expo = math.frexp(dval)
if expo <= 0:
return NULLRBIGINT
ndig = (expo-1) // SHIFT + 1
v = rbigint([NULLDIGIT] * ndig, sign, ndig)
frac = math.ldexp(frac, (expo-1) % SHIFT + 1)
for i in range(ndig-1, -1, -1):
bits = int(int(frac))
v.setdigit(i, bits)
frac -= float(bits)
frac = math.ldexp(frac, SHIFT)
return v
@staticmethod
@jit.elidable
@specialize.argtype(0)
def fromrarith_int(i):
return rbigint(*args_from_rarith_int(i))
@staticmethod
@jit.elidable
def fromdecimalstr(s):
return _decimalstr_to_bigint(s)
@staticmethod
@jit.elidable
def fromstr(s, base=0, allow_underscores=False):
"""As string_to_int(), but ignores an optional 'l' or 'L' suffix
and returns an rbigint."""
from rpython.rlib.rstring import NumberStringParser, \
strip_spaces
s = literal = strip_spaces(s)
if (s.endswith('l') or s.endswith('L')) and base < 22:
s = s[:-1]
parser = NumberStringParser(s, literal, base, 'long',
allow_underscores=allow_underscores)
return rbigint._from_numberstring_parser(parser)
@staticmethod
def _from_numberstring_parser(parser):
return parse_digit_string(parser)
@staticmethod
@jit.elidable
def frombytes(s, byteorder, signed):
if byteorder not in ('big', 'little'):
raise InvalidEndiannessError()
if not s:
return NULLRBIGINT
if byteorder == 'big':
msb = ord(s[0])
itr = range(len(s)-1, -1, -1)
else:
msb = ord(s[-1])
itr = range(0, len(s))
sign = -1 if msb >= 0x80 and signed else 1
accum = _widen_digit(0)
accumbits = 0
digits = []
carry = 1
for i in itr:
c = _widen_digit(ord(s[i]))
if sign == -1:
c = (0xFF ^ c) + carry
carry = c >> 8
c &= 0xFF
accum |= c << accumbits
accumbits += 8
if accumbits >= SHIFT:
digits.append(_store_digit(intmask(accum & MASK)))
accum >>= SHIFT
accumbits -= SHIFT
if accumbits:
digits.append(_store_digit(intmask(accum)))
result = rbigint(digits[:], sign)
result._normalize()
return result
@jit.elidable
def tobytes(self, nbytes, byteorder, signed):
if byteorder not in ('big', 'little'):
raise InvalidEndiannessError()
if not signed and self.sign == -1:
raise InvalidSignednessError()
bswap = byteorder == 'big'
d = _widen_digit(0)
j = 0
imax = self.numdigits()
accum = _widen_digit(0)
accumbits = 0
result = StringBuilder(nbytes)
carry = 1
for i in range(0, imax):
d = self.widedigit(i)
if self.sign == -1:
d = (d ^ MASK) + carry
carry = d >> SHIFT
d &= MASK
accum |= d << accumbits
if i == imax - 1:
s = d ^ MASK if self.sign == -1 else d
while s:
s >>= 1
accumbits += 1
else:
accumbits += SHIFT
while accumbits >= 8:
if j >= nbytes:
raise OverflowError()
j += 1
result.append(chr(accum & 0xFF))
accum >>= 8
accumbits -= 8
if accumbits:
if j >= nbytes:
raise OverflowError()
j += 1
if self.sign == -1:
# Add a sign bit
accum |= (~_widen_digit(0)) << accumbits
result.append(chr(accum & 0xFF))
if j < nbytes:
signbyte = 0xFF if self.sign == -1 else 0
result.append_multiple_char(chr(signbyte), nbytes - j)
digits = result.build()
if j == nbytes and nbytes > 0 and signed:
# If not already set, we cannot contain the sign bit
msb = digits[-1]
if (self.sign == -1) != (ord(msb) >= 0x80):
raise OverflowError()
if bswap:
# Bah, this is very inefficient. At least it's not
length = len(digits)
if length >= 0:
digits = ''.join([digits[i] for i in range(length-1, -1, -1)])
return digits
def toint(self):
"""
Get an integer from a bigint object.
Raises OverflowError if overflow occurs.
"""
if self.numdigits() > MAX_DIGITS_THAT_CAN_FIT_IN_INT:
raise OverflowError
return self._toint_helper()
@jit.elidable
def _toint_helper(self):
x = self._touint_helper()
if self.sign >= 0:
res = intmask(x)
if res < 0:
raise OverflowError
else:
# Use "-" on the unsigned number, not on the signed number.
# This is needed to produce valid C code.
res = intmask(-x)
if res >= 0:
raise OverflowError
return res
@jit.elidable
def tolonglong(self):
return _AsLongLong(self)
def tobool(self):
return self.sign != 0
@jit.elidable
def touint(self):
if self.sign == -1:
raise ValueError("cannot convert negative integer to unsigned int")
return self._touint_helper()
@jit.elidable
def _touint_helper(self):
x = r_uint(0)
i = self.numdigits() - 1
while i >= 0:
prev = x
x = (x << SHIFT) + self.udigit(i)
if (x >> SHIFT) != prev:
raise OverflowError("long int too large to convert to unsigned int")
i -= 1
return x
@jit.elidable
def toulonglong(self):
if self.sign == -1:
raise ValueError("cannot convert negative integer to unsigned int")
return _AsULonglong_ignore_sign(self)
@jit.elidable
def uintmask(self):
return _AsUInt_mask(self)
@jit.elidable
def ulonglongmask(self):
"""Return r_ulonglong(self), truncating."""
return _AsULonglong_mask(self)
@jit.elidable
def tofloat(self):
return _AsDouble(self)
@jit.elidable
def format(self, digits, prefix='', suffix=''):
# 'digits' is a string whose length is the base to use,
# and where each character is the corresponding digit.
return _format(self, digits, prefix, suffix)
@jit.elidable
def repr(self):
try:
x = self.toint()
except OverflowError:
return self.format(BASE10, suffix="L")
return str(x) + "L"
@jit.elidable
def str(self):
try:
x = self.toint()
except OverflowError:
return self.format(BASE10)
return str(x)
@jit.elidable
def eq(self, other):
if (self.sign != other.sign or
self.numdigits() != other.numdigits()):
return False
i = 0
ld = self.numdigits()
while i < ld:
if self.digit(i) != other.digit(i):
return False
i += 1
return True
@jit.elidable
def int_eq(self, other):
""" eq with int """
if not int_in_valid_range(other):
# Fallback to Long.
return self.eq(rbigint.fromint(other))
if self.numdigits() > 1:
return False
return (self.sign * self.digit(0)) == other
def ne(self, other):
return not self.eq(other)
def int_ne(self, other):
return not self.int_eq(other)
@jit.elidable
def lt(self, other):
if self.sign > other.sign:
return False
if self.sign < other.sign:
return True
ld1 = self.numdigits()
ld2 = other.numdigits()
if ld1 > ld2:
if other.sign > 0:
return False
else:
return True
elif ld1 < ld2:
if other.sign > 0:
return True
else:
return False
i = ld1 - 1
while i >= 0:
d1 = self.digit(i)
d2 = other.digit(i)
if d1 < d2:
if other.sign > 0:
return True
else:
return False
elif d1 > d2:
if other.sign > 0:
return False
else:
return True
i -= 1
return False
@jit.elidable
def int_lt(self, other):
""" lt where other is an int """
if not int_in_valid_range(other):
# Fallback to Long.
return self.lt(rbigint.fromint(other))
osign = 1
if other == 0:
osign = 0
elif other < 0:
osign = -1
if self.sign > osign:
return False
elif self.sign < osign:
return True
digits = self.numdigits()
if digits > 1:
if osign == 1:
return False
else:
return True
d1 = self.sign * self.digit(0)
if d1 < other:
return True
return False
def le(self, other):
return not other.lt(self)
def int_le(self, other):
# Alternative that might be faster, reimplant this. as a check with other + 1. But we got to check for overflow
# or reduce valid range.
if self.int_eq(other):
return True
return self.int_lt(other)
def gt(self, other):
return other.lt(self)
def int_gt(self, other):
return not self.int_le(other)
def ge(self, other):
return not self.lt(other)
def int_ge(self, other):
return not self.int_lt(other)
@jit.elidable
def hash(self):
return _hash(self)
@jit.elidable
def add(self, other):
if self.sign == 0:
return other
if other.sign == 0:
return self
if self.sign == other.sign:
result = _x_add(self, other)
else:
result = _x_sub(other, self)
result.sign *= other.sign
return result
@jit.elidable
def int_add(self, other):
if not int_in_valid_range(other):
# Fallback to long.
return self.add(rbigint.fromint(other))
elif self.sign == 0:
return rbigint.fromint(other)
elif other == 0:
return self
sign = -1 if other < 0 else 1
if self.sign == sign:
result = _x_int_add(self, other)
else:
result = _x_int_sub(self, other)
result.sign *= -1
result.sign *= sign
return result
@jit.elidable
def sub(self, other):
if other.sign == 0:
return self
elif self.sign == 0:
return rbigint(other._digits[:other.size], -other.sign, other.size)
elif self.sign == other.sign:
result = _x_sub(self, other)
else:
result = _x_add(self, other)
result.sign *= self.sign
return result
@jit.elidable
def int_sub(self, other):
if not int_in_valid_range(other):
# Fallback to long.
return self.sub(rbigint.fromint(other))
elif other == 0:
return self
elif self.sign == 0:
return rbigint.fromint(-other)
elif self.sign == (-1 if other < 0 else 1):
result = _x_int_sub(self, other)
else:
result = _x_int_add(self, other)
result.sign *= self.sign
return result
@jit.elidable
def mul(self, b):
asize = self.numdigits()
bsize = b.numdigits()
a = self
if asize > bsize:
a, b, asize, bsize = b, a, bsize, asize
if a.sign == 0 or b.sign == 0:
return NULLRBIGINT
if asize == 1:
if a._digits[0] == ONEDIGIT:
return rbigint(b._digits[:b.size], a.sign * b.sign, b.size)
elif bsize == 1:
res = b.widedigit(0) * a.widedigit(0)
carry = res >> SHIFT
if carry:
return rbigint([_store_digit(res & MASK), _store_digit(carry)], a.sign * b.sign, 2)
else:
return rbigint([_store_digit(res & MASK)], a.sign * b.sign, 1)
result = _x_mul(a, b, a.digit(0))
elif USE_KARATSUBA:
if a is b:
i = KARATSUBA_SQUARE_CUTOFF
else:
i = KARATSUBA_CUTOFF
if asize <= i:
result = _x_mul(a, b)
"""elif 2 * asize <= bsize:
result = _k_lopsided_mul(a, b)"""
else:
result = _k_mul(a, b)
else:
result = _x_mul(a, b)
result.sign = a.sign * b.sign
return result
@jit.elidable
def int_mul(self, b):
if not int_in_valid_range(b):
# Fallback to long.
return self.mul(rbigint.fromint(b))
if self.sign == 0 or b == 0:
return NULLRBIGINT
asize = self.numdigits()
digit = abs(b)
bsign = -1 if b < 0 else 1
if digit == 1:
return rbigint(self._digits[:self.size], self.sign * bsign, asize)
elif asize == 1:
res = self.widedigit(0) * digit
carry = res >> SHIFT
if carry:
return rbigint([_store_digit(res & MASK), _store_digit(carry)], self.sign * bsign, 2)
else:
return rbigint([_store_digit(res & MASK)], self.sign * bsign, 1)
elif digit & (digit - 1) == 0:
result = self.lqshift(ptwotable[digit])
else:
result = _muladd1(self, digit)
result.sign = self.sign * bsign
return result
@jit.elidable
def truediv(self, other):
div = _bigint_true_divide(self, other)
return div
@jit.elidable
def floordiv(self, other):
if self.sign == 1 and other.numdigits() == 1 and other.sign == 1:
digit = other.digit(0)
if digit == 1:
return rbigint(self._digits[:self.size], 1, self.size)
elif digit and digit & (digit - 1) == 0:
return self.rshift(ptwotable[digit])
div, mod = _divrem(self, other)
if mod.sign * other.sign == -1:
if div.sign == 0:
return ONENEGATIVERBIGINT
div = div.int_sub(1)
return div
def div(self, other):
return self.floordiv(other)
@jit.elidable
def mod(self, other):
if other.sign == 0:
raise ZeroDivisionError("long division or modulo by zero")
if self.sign == 0:
return NULLRBIGINT
if other.numdigits() == 1:
otherint = other.digit(0) * other.sign
assert int_in_valid_range(otherint)
return self.int_mod(otherint)
else:
div, mod = _divrem(self, other)
if mod.sign * other.sign == -1:
mod = mod.add(other)
return mod
@jit.elidable
def int_mod(self, other):
if other == 0:
raise ZeroDivisionError("long division or modulo by zero")
if self.sign == 0:
return NULLRBIGINT
elif not int_in_valid_range(other):
# Fallback to long.
return self.mod(rbigint.fromint(other))
if 1: # preserve indentation to preserve history
digit = abs(other)
if digit == 1:
return NULLRBIGINT
elif digit == 2:
modm = self.digit(0) & 1
if modm:
return ONENEGATIVERBIGINT if other < 0 else ONERBIGINT
return NULLRBIGINT
elif digit & (digit - 1) == 0:
mod = self.int_and_(digit - 1)
else:
# Perform
size = self.numdigits() - 1
if size > 0:
rem = self.widedigit(size)
size -= 1
while size >= 0:
rem = ((rem << SHIFT) + self.widedigit(size)) % digit
size -= 1
else:
rem = self.digit(0) % digit
if rem == 0:
return NULLRBIGINT
mod = rbigint([_store_digit(rem)], -1 if self.sign < 0 else 1, 1)
if mod.sign * (-1 if other < 0 else 1) == -1:
mod = mod.int_add(other)
return mod
@jit.elidable
def divmod(v, w):
"""
The / and % operators are now defined in terms of divmod().
The expression a mod b has the value a - b*floor(a/b).
The _divrem function gives the remainder after division of
|a| by |b|, with the sign of a. This is also expressed
as a - b*trunc(a/b), if trunc truncates towards zero.
Some examples:
a b a rem b a mod b
13 10 3 3
-13 10 -3 7
13 -10 3 -7
-13 -10 -3 -3
So, to get from rem to mod, we have to add b if a and b
have different signs. We then subtract one from the 'div'
part of the outcome to keep the invariant intact.
"""
div, mod = _divrem(v, w)
if mod.sign * w.sign == -1:
mod = mod.add(w)
if div.sign == 0:
return ONENEGATIVERBIGINT, mod
div = div.int_sub(1)
return div, mod
@jit.elidable
def pow(a, b, c=None):
negativeOutput = False # if x<0 return negative output
# 5-ary values. If the exponent is large enough, table is
# precomputed so that table[i] == a**i % c for i in range(32).
# python translation: the table is computed when needed.
if b.sign < 0: # if exponent is negative
if c is not None:
raise TypeError(
"pow() 2nd argument "
"cannot be negative when 3rd argument specified")
# XXX failed to implement
raise ValueError("bigint pow() too negative")
size_b = b.numdigits()
if c is not None:
if c.sign == 0:
raise ValueError("pow() 3rd argument cannot be 0")
# if modulus < 0:
# negativeOutput = True
# modulus = -modulus
if c.sign < 0:
negativeOutput = True
c = c.neg()
# if modulus == 1:
# return 0
if c.numdigits() == 1 and c._digits[0] == ONEDIGIT:
return NULLRBIGINT
# Reduce base by modulus in some cases:
# 1. If base < 0. Forcing the base non-neg makes things easier.
# 2. If base is obviously larger than the modulus. The "small
# exponent" case later can multiply directly by base repeatedly,
# while the "large exponent" case multiplies directly by base 31
# times. It can be unboundedly faster to multiply by
# base % modulus instead.
# We could _always_ do this reduction, but mod() isn't cheap,
if a.sign < 0 or a.numdigits() > c.numdigits():
a = a.mod(c)
elif b.sign == 0:
return ONERBIGINT
elif a.sign == 0:
return NULLRBIGINT
elif size_b == 1:
if b._digits[0] == NULLDIGIT:
return ONERBIGINT if a.sign == 1 else ONENEGATIVERBIGINT
elif b._digits[0] == ONEDIGIT:
return a
elif a.numdigits() == 1:
adigit = a.digit(0)
digit = b.digit(0)
if adigit == 1:
if a.sign == -1 and digit % 2:
return ONENEGATIVERBIGINT
return ONERBIGINT
elif adigit & (adigit - 1) == 0:
ret = a.lshift(((digit-1)*(ptwotable[adigit]-1)) + digit-1)
if a.sign == -1 and not digit % 2:
ret.sign = 1
return ret
z = rbigint([ONEDIGIT], 1, 1)
if size_b <= FIVEARY_CUTOFF:
size_b -= 1
while size_b >= 0:
bi = b.digit(size_b)
j = 1 << (SHIFT-1)
while j != 0:
z = _help_mult(z, z, c)
if bi & j:
z = _help_mult(z, a, c)
j >>= 1
size_b -= 1
else:
table = [z] * 32
table[0] = z
for i in range(1, 32):
table[i] = _help_mult(table[i-1], a, c)
j = size_b % 5
j = _jmapping[j]
if not we_are_translated():
assert j == (size_b*SHIFT+4)//5*5 - size_b*SHIFT
accum = r_uint(0)
while True:
j -= 5
if j >= 0:
index = (accum >> j) & 0x1f
else:
if size_b == 0:
break
size_b -= 1
assert size_b >= 0
bi = b.udigit(size_b)
index = ((accum << (-j)) | (bi >> (j+SHIFT))) & 0x1f
accum = bi
j += SHIFT
for k in range(5):
z = _help_mult(z, z, c)
if index:
z = _help_mult(z, table[index], c)
assert j == -5
if negativeOutput and z.sign != 0:
z = z.sub(c)
return z
@jit.elidable
def neg(self):
return rbigint(self._digits, -self.sign, self.size)
@jit.elidable
def abs(self):
if self.sign != -1:
return self
return rbigint(self._digits, 1, self.size)
@jit.elidable
def invert(self):
if self.sign == 0:
return ONENEGATIVERBIGINT
ret = self.int_add(1)
ret.sign = -ret.sign
return ret
@jit.elidable
def lshift(self, int_other):
if int_other < 0:
raise ValueError("negative shift count")
elif int_other == 0:
return self
wordshift = int_other // SHIFT
remshift = int_other - wordshift * SHIFT
if not remshift:
if self.sign == 0:
return self
return rbigint([NULLDIGIT] * wordshift + self._digits, self.sign, self.size + wordshift)
oldsize = self.numdigits()
newsize = oldsize + wordshift + 1
z = rbigint([NULLDIGIT] * newsize, self.sign, newsize)
accum = _widen_digit(0)
j = 0
while j < oldsize:
accum += self.widedigit(j) << remshift
z.setdigit(wordshift, accum)
accum >>= SHIFT
wordshift += 1
j += 1
newsize -= 1
assert newsize >= 0
z.setdigit(newsize, accum)
z._normalize()
return z
lshift._always_inline_ = True
@jit.elidable
def lqshift(self, int_other):
" A quicker one with much less checks, int_other is valid and for the most part constant."
assert int_other > 0
oldsize = self.numdigits()
z = rbigint([NULLDIGIT] * (oldsize + 1), self.sign, (oldsize + 1))
accum = _widen_digit(0)
i = 0
while i < oldsize:
accum += self.widedigit(i) << int_other
z.setdigit(i, accum)
accum >>= SHIFT
i += 1
z.setdigit(oldsize, accum)
z._normalize()
return z
lqshift._always_inline_ = True
@jit.elidable
def rshift(self, int_other, dont_invert=False):
if int_other < 0:
raise ValueError("negative shift count")
elif int_other == 0:
return self
if self.sign == -1 and not dont_invert:
a = self.invert().rshift(int_other)
return a.invert()
wordshift = int_other / SHIFT
newsize = self.numdigits() - wordshift
if newsize <= 0:
return NULLRBIGINT
loshift = int_other % SHIFT
hishift = SHIFT - loshift
z = rbigint([NULLDIGIT] * newsize, self.sign, newsize)
i = 0
while i < newsize:
newdigit = (self.digit(wordshift) >> loshift)
if i+1 < newsize:
newdigit |= (self.digit(wordshift+1) << hishift)
z.setdigit(i, newdigit)
i += 1
wordshift += 1
z._normalize()
return z
rshift._always_inline_ = 'try'
@jit.elidable
def abs_rshift_and_mask(self, bigshiftcount, mask):
assert isinstance(bigshiftcount, r_ulonglong)
assert mask >= 0
wordshift = bigshiftcount / SHIFT
numdigits = self.numdigits()
if wordshift >= numdigits:
return 0
wordshift = intmask(wordshift)
loshift = intmask(intmask(bigshiftcount) - intmask(wordshift * SHIFT))
lastdigit = self.digit(wordshift) >> loshift
if mask > (MASK >> loshift) and wordshift + 1 < numdigits:
hishift = SHIFT - loshift
lastdigit |= self.digit(wordshift+1) << hishift
return lastdigit & mask
@staticmethod
def from_list_n_bits(list, nbits):
if len(list) == 0:
return NULLRBIGINT
if nbits == SHIFT:
z = rbigint(list, 1)
else:
if not (1 <= nbits < SHIFT):
raise ValueError
lllength = (r_ulonglong(len(list)) * nbits) // SHIFT
length = intmask(lllength) + 1
z = rbigint([NULLDIGIT] * length, 1)
out = 0
i = 0
accum = 0
for input in list:
accum |= (input << i)
original_i = i
i += nbits
if i > SHIFT:
z.setdigit(out, accum)
out += 1
accum = input >> (SHIFT - original_i)
i -= SHIFT
assert out < length
z.setdigit(out, accum)
z._normalize()
return z
@jit.elidable
def and_(self, other):
return _bitwise(self, '&', other)
@jit.elidable
def int_and_(self, other):
return _int_bitwise(self, '&', other)
@jit.elidable
def xor(self, other):
return _bitwise(self, '^', other)
@jit.elidable
def int_xor(self, other):
return _int_bitwise(self, '^', other)
@jit.elidable
def or_(self, other):
return _bitwise(self, '|', other)
@jit.elidable
def int_or_(self, other):
return _int_bitwise(self, '|', other)
@jit.elidable
def oct(self):
if self.sign == 0:
return '0L'
else:
return _format(self, BASE8, '0', 'L')
@jit.elidable
def hex(self):
return _format(self, BASE16, '0x', 'L')
@jit.elidable
def log(self, base):
if base == 10.0:
return _loghelper(math.log10, self)
if base == 2.0:
from rpython.rlib import rfloat
return _loghelper(rfloat.log2, self)
ret = _loghelper(math.log, self)
if base != 0.0:
ret /= math.log(base)
return ret
@not_rpython
def tolong(self):
l = 0L
digits = list(self._digits)
digits.reverse()
for d in digits:
l = l << SHIFT
l += intmask(d)
return l * self.sign
def _normalize(self):
i = self.numdigits()
while i > 1 and self._digits[i - 1] == NULLDIGIT:
i -= 1
assert i > 0
if i != self.numdigits():
self.size = i
if self.numdigits() == 1 and self._digits[0] == NULLDIGIT:
self.sign = 0
self._digits = [NULLDIGIT]
_normalize._always_inline_ = True
@jit.elidable
def bit_length(self):
i = self.numdigits()
if i == 1 and self._digits[0] == NULLDIGIT:
return 0
msd = self.digit(i - 1)
msd_bits = 0
while msd >= 32:
msd_bits += 6
msd >>= 6
msd_bits += [
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
][msd]
bits = ovfcheck((i-1) * SHIFT) + msd_bits
return bits
def __repr__(self):
return "<rbigint digits=%s, sign=%s, size=%d, len=%d, %s>" % (self._digits,
self.sign, self.size, len(self._digits),
self.str())
ONERBIGINT = rbigint([ONEDIGIT], 1, 1)
ONENEGATIVERBIGINT = rbigint([ONEDIGIT], -1, 1)
NULLRBIGINT = rbigint()
_jmapping = [(5 * SHIFT) % 5,
(4 * SHIFT) % 5,
(3 * SHIFT) % 5,
(2 * SHIFT) % 5,
(1 * SHIFT) % 5]
MAX_DIGITS_THAT_CAN_FIT_IN_INT = rbigint.fromint(-sys.maxint - 1).numdigits()
def _help_mult(x, y, c):
"""
Multiply two values, then reduce the result:
result = X*Y % c. If c is None, skip the mod.
"""
res = x.mul(y)
if c is not None:
res = res.mod(c)
return res
@specialize.argtype(0)
def digits_from_nonneg_long(l):
digits = []
while True:
digits.append(_store_digit(_mask_digit(l & MASK)))
l = l >> SHIFT
if not l:
return digits[:]
@specialize.argtype(0)
def digits_for_most_neg_long(l):
digits = []
while _mask_digit(l) == 0:
digits.append(NULLDIGIT)
l = l >> SHIFT
l = -intmask(l)
assert l & MASK == l
digits.append(_store_digit(l))
return digits[:] # to make it non-resizable
@specialize.argtype(0)
def args_from_rarith_int1(x):
if x > 0:
return digits_from_nonneg_long(x), 1
elif x == 0:
return [NULLDIGIT], 0
elif x != most_neg_value_of_same_type(x):
# normal case
return digits_from_nonneg_long(-x), -1
else:
# the most negative integer! hacks needed...
return digits_for_most_neg_long(x), -1
@specialize.argtype(0)
def args_from_rarith_int(x):
return args_from_rarith_int1(widen(x))
# ^^^ specialized by the precise type of 'x', which is typically a r_xxx
# instance from rlib.rarithmetic
@not_rpython
def args_from_long(x):
if x >= 0:
if x == 0:
return [NULLDIGIT], 0
else:
return digits_from_nonneg_long(x), 1
else:
return digits_from_nonneg_long(-x), -1
def _x_add(a, b):
""" Add the absolute values of two bigint integers. """
size_a = a.numdigits()
size_b = b.numdigits()
# Ensure a is the larger of the two:
if size_a < size_b:
a, b = b, a
size_a, size_b = size_b, size_a
z = rbigint([NULLDIGIT] * (size_a + 1), 1)
i = UDIGIT_TYPE(0)
carry = UDIGIT_TYPE(0)
while i < size_b:
carry += a.udigit(i) + b.udigit(i)
z.setdigit(i, carry)
carry >>= SHIFT
i += 1
while i < size_a:
carry += a.udigit(i)
z.setdigit(i, carry)
carry >>= SHIFT
i += 1
z.setdigit(i, carry)
z._normalize()
return z
def _x_int_add(a, b):
""" Add the absolute values of one bigint and one integer. """
size_a = a.numdigits()
z = rbigint([NULLDIGIT] * (size_a + 1), 1)
i = UDIGIT_TYPE(1)
carry = a.udigit(0) + abs(b)
z.setdigit(0, carry)
carry >>= SHIFT
while i < size_a:
carry += a.udigit(i)
z.setdigit(i, carry)
carry >>= SHIFT
i += 1
z.setdigit(i, carry)
z._normalize()
return z
def _x_sub(a, b):
""" Subtract the absolute values of two integers. """
size_a = a.numdigits()
size_b = b.numdigits()
sign = 1
# Ensure a is the larger of the two:
if size_a < size_b:
sign = -1
a, b = b, a
size_a, size_b = size_b, size_a
elif size_a == size_b:
# Find highest digit where a and b differ:
i = size_a - 1
while i >= 0 and a.digit(i) == b.digit(i):
i -= 1
if i < 0:
return NULLRBIGINT
if a.digit(i) < b.digit(i):
sign = -1
a, b = b, a
size_a = size_b = i+1
z = rbigint([NULLDIGIT] * size_a, sign, size_a)
borrow = UDIGIT_TYPE(0)
i = _load_unsigned_digit(0)
while i < size_b:
# The following assumes unsigned arithmetic
# works modulo 2**N for some N>SHIFT.
borrow = a.udigit(i) - b.udigit(i) - borrow
z.setdigit(i, borrow)
borrow >>= SHIFT
#borrow &= 1 # Keep only one sign bit
i += 1
while i < size_a:
borrow = a.udigit(i) - borrow
z.setdigit(i, borrow)
borrow >>= SHIFT
#borrow &= 1
i += 1
assert borrow == 0
z._normalize()
return z
def _x_int_sub(a, b):
""" Subtract the absolute values of two integers. """
size_a = a.numdigits()
bdigit = abs(b)
if size_a == 1:
# Find highest digit where a and b differ:
adigit = a.digit(0)
if adigit == bdigit:
return NULLRBIGINT
return rbigint.fromint(adigit - bdigit)
z = rbigint([NULLDIGIT] * size_a, 1, size_a)
i = _load_unsigned_digit(1)
# The following assumes unsigned arithmetic
# works modulo 2**N for some N>SHIFT.
borrow = a.udigit(0) - bdigit
z.setdigit(0, borrow)
borrow >>= SHIFT
#borrow &= 1 # Keep only one sign bit
while i < size_a:
borrow = a.udigit(i) - borrow
z.setdigit(i, borrow)
borrow >>= SHIFT
#borrow &= 1
i += 1
assert borrow == 0
z._normalize()
return z
# A neat little table of power of twos.
ptwotable = {}
for x in range(SHIFT-1):
ptwotable[r_longlong(2 << x)] = x+1
ptwotable[r_longlong(-2 << x)] = x+1
def _x_mul(a, b, digit=0):
"""
Grade school multiplication, ignoring the signs.
Returns the absolute value of the product, or None if error.
"""
size_a = a.numdigits()
size_b = b.numdigits()
if a is b:
# Efficient squaring per HAC, Algorithm 14.16:
# http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
# Gives slightly less than a 2x speedup when a == b,
# via exploiting that each entry in the multiplication
# pyramid appears twice (except for the size_a squares).
z = rbigint([NULLDIGIT] * (size_a + size_b), 1)
i = UDIGIT_TYPE(0)
while i < size_a:
f = a.widedigit(i)
pz = i << 1
pa = i + 1
carry = z.widedigit(pz) + f * f
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
assert carry <= MASK
# Now f is added in twice in each column of the
# pyramid it appears. Same as adding f<<1 once.
f <<= 1
while pa < size_a:
carry += z.widedigit(pz) + a.widedigit(pa) * f
pa += 1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
carry += z.widedigit(pz)
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
z.setdigit(pz, z.widedigit(pz) + carry)
assert (carry >> SHIFT) == 0
i += 1
z._normalize()
return z
elif digit:
if digit & (digit - 1) == 0:
return b.lqshift(ptwotable[digit])
# Even if it's not power of two it can still be useful.
return _muladd1(b, digit)
z = rbigint([NULLDIGIT] * (size_a + size_b), 1)
i = UDIGIT_TYPE(0)
size_a1 = UDIGIT_TYPE(size_a - 1)
size_b1 = UDIGIT_TYPE(size_b - 1)
while i < size_a1:
f0 = a.widedigit(i)
f1 = a.widedigit(i + 1)
pz = i
carry = z.widedigit(pz) + b.widedigit(0) * f0
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
j = UDIGIT_TYPE(0)
while j < size_b1:
carry += z.widedigit(pz) + b.widedigit(j + 1) * f0 + \
b.widedigit(j) * f1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
j += 1
carry += z.widedigit(pz) + b.widedigit(size_b1) * f1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
z.setdigit(pz, carry)
assert (carry >> SHIFT) == 0
i += 2
if size_a & 1:
pz = size_a1
f = a.widedigit(pz)
pb = 0
carry = _widen_digit(0)
while pb < size_b:
carry += z.widedigit(pz) + b.widedigit(pb) * f
pb += 1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
z.setdigit(pz, z.widedigit(pz) + carry)
z._normalize()
return z
def _kmul_split(n, size):
"""
A helper for Karatsuba multiplication (k_mul).
Takes a bigint "n" and an integer "size" representing the place to
split, and sets low and high such that abs(n) == (high << size) + low,
viewing the shift as being by digits. The sign bit is ignored, and
the return values are >= 0.
"""
size_n = n.numdigits()
size_lo = min(size_n, size)
lo = rbigint(n._digits[:size_lo] or [NULLDIGIT], 1)
hi = rbigint(n._digits[size_lo:n.size] or [NULLDIGIT], 1)
lo._normalize()
hi._normalize()
return hi, lo
def _k_mul(a, b):
"""
Karatsuba multiplication. Ignores the input signs, and returns the
absolute value of the product (or raises if error).
See Knuth Vol. 2 Chapter 4.3.3 (Pp. 294-295).
"""
asize = a.numdigits()
bsize = b.numdigits()
# been reduced to 3 multiplies on numbers half the size.
# Split a & b into hi & lo pieces.
shift = bsize >> 1
ah, al = _kmul_split(a, shift)
if ah.sign == 0:
# This may happen now that _k_lopsided_mul ain't catching it.
return _x_mul(a, b)
= ah
bl = al
else:
bh, bl = _kmul_split(b, shift)
# The plan:
# 1. Allocate result space (asize + bsize digits: that's always
# 4. Subtract al*bl from the result, starting at shift. This may
# underflow (borrow out of the high digit), but we don't care:
# BASE**(sizea + sizeb), and so long as the *final* result fits,
# borrows and carries out of the high digit can be ignored.
# 5. Subtract ah*bh from the result, starting at shift.
# 6. Compute (ah+al)*(bh+bl), and add it into the result starting
# at shift.
# 1. Allocate result space.
ret = rbigint([NULLDIGIT] * (asize + bsize), 1)
# 2. t1 <- ah*bh, and copy into high digits of result.
t1 = ah.mul(bh)
assert t1.sign >= 0
assert 2*shift + t1.numdigits() <= ret.numdigits()
for i in range(t1.numdigits()):
ret._digits[2*shift + i] = t1._digits[i]
# Zero-out the digits higher than the ah*bh copy. */
## ignored, assuming that we initialize to zero
##i = ret->ob_size - 2*shift - t1->ob_size;
##if (i)
## memset(ret->ob_digit + 2*shift + t1->ob_size, 0,
## i * sizeof(digit));
# 3. t2 <- al*bl, and copy into the low digits.
t2 = al.mul(bl)
assert t2.sign >= 0
assert t2.numdigits() <= 2*shift # no overlap with high digits
for i in range(t2.numdigits()):
ret._digits[i] = t2._digits[i]
# Zero out remaining digits.
## ignored, assuming that we initialize to zero
##i = 2*shift - t2->ob_size; /* number of uninitialized digits */
##if (i)
## memset(ret->ob_digit + t2->ob_size, 0, i * sizeof(digit));
# 4 & 5. Subtract ah*bh (t1) and al*bl (t2). We do al*bl first
# because it's fresher in cache.
i = ret.numdigits() - shift ift, i, t2, t2.numdigits())
_v_isub(ret, shift, i, t1, t1.numdigits())
t1 = _x_add(ah, al)
if a is b:
t2 = t1
else:
t2 = _x_add(bh, bl)
t3 = t1.mul(t2)
assert t3.sign >= 0
_v_iadd(ret, shift, i, t3, t3.numdigits())
ret._normalize()
return ret
""" (*) Why adding t3 can't "run out of room" above.
Let f(x) mean the floor of x and c(x) mean the ceiling of x. Some facts
to start with:
1. For any integer i, i = c(i/2) + f(i/2). In particular,
bsize = c(bsize/2) + f(bsize/2).
2. shift = f(bsize/2)
3. asize <= bsize
4. Since we call k_lopsided_mul if asize*2 <= bsize, asize*2 > bsize in this
routine, so asize > bsize/2 >= f(bsize/2) in this routine.
We allocated asize + bsize result digits, and add t3 into them at an offset
of shift. This leaves asize+bsize-shift allocated digit positions for t3
to fit into, = (by #1 and #2) asize + f(bsize/2) + c(bsize/2) - f(bsize/2) =
asize + c(bsize/2) available digit positions.
bh has c(bsize/2) digits, and bl at most f(size/2) digits. So bh+hl has
at most c(bsize/2) digits + 1 bit.
If asize == bsize, ah has c(bsize/2) digits, else ah has at most f(bsize/2)
digits, and al has at most f(bsize/2) digits in any case. So ah+al has at
most (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 1 bit.
The product (ah+al)*(bh+bl) therefore has at most
c(bsize/2) + (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits
and we have asize + c(bsize/2) available digit positions. We need to show
this is always enough. An instance of c(bsize/2) cancels out in both, so
the question reduces to whether asize digits is enough to hold
(asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits. If asize < bsize,
then we're asking whether asize digits >= f(bsize/2) digits + 2 bits. By #4,
asize is at least f(bsize/2)+1 digits, so this in turn reduces to whether 1
digit is enough to hold 2 bits. This is so since SHIFT=15 >= 2. If
asize == bsize, then we're asking whether bsize digits is enough to hold
c(bsize/2) digits + 2 bits, or equivalently (by #1) whether f(bsize/2) digits
is enough to hold 2 bits. This is so if bsize >= 2, which holds because
bsize >= KARATSUBA_CUTOFF >= 2.
Note that since there's always enough room for (ah+al)*(bh+bl), and that's
clearly >= each of ah*bh and al*bl, there's always enough room to subtract
ah*bh and al*bl too.
"""
def _k_lopsided_mul(a, b):
"""
b has at least twice the digits of a, and a is big enough that Karatsuba
would pay off *if* the inputs had balanced sizes. View b as a sequence
of slices, each with a->ob_size digits, and multiply the slices by a,
one at a time. This gives k_mul balanced inputs to work with, and is
also cache-friendly (we compute one double-width slice of the result
at a time, then move on, never bactracking except for the helpful
single-width slice overlap between successive partial sums).
"""
asize = a.numdigits()
bsize = b.numdigits()
UTOFF
assert 2 * asize <= bsize
ret = rbigint([NULLDIGIT] * (asize + bsize), 1)
bslice = rbigint(sign=1)
nbdone = 0
while bsize > 0:
nbtouse = min(bsize, asize)
# way to store the size, instead of resizing the list!
# XXX change the implementation, encoding length via the sign.
bslice._digits = b._digits[nbdone : nbdone + nbtouse]
bslice.size = nbtouse
product = _k_mul(a, bslice)
# Add into result.
_v_iadd(ret, nbdone, ret.numdigits() - nbdone,
product, product.numdigits())
bsize -= nbtouse
nbdone += nbtouse
ret._normalize()
return ret
def _inplace_divrem1(pout, pin, n):
"""
Divide bigint pin by non-zero digit n, storing quotient
in pout, and returning the remainder. It's OK for pin == pout on entry.
"""
rem = _widen_digit(0)
assert n > 0 and n <= MASK
size = pin.numdigits() - 1
while size >= 0:
rem = (rem << SHIFT) | pin.widedigit(size)
hi = rem // n
pout.setdigit(size, hi)
rem -= hi * n
size -= 1
return rffi.cast(lltype.Signed, rem)
def _divrem1(a, n):
"""
Divide a bigint integer by a digit, returning both the quotient
and the remainder as a tuple.
The sign of a is ignored; n should not be zero.
"""
assert n > 0 and n <= MASK
size = a.numdigits()
z = rbigint([NULLDIGIT] * size, 1, size)
rem = _inplace_divrem1(z, a, n)
z._normalize()
return z, rem
def _v_iadd(x, xofs, m, y, n):
"""
x and y are rbigints, m >= n required. x.digits[0:n] is modified in place,
by adding y.digits[0:m] to it. Carries are propagated as far as
x[m-1], and the remaining carry (0 or 1) is returned.
Python adaptation: x is addressed relative to xofs!
"""
carry = UDIGIT_TYPE(0)
assert m >= n
i = _load_unsigned_digit(xofs)
iend = xofs + n
while i < iend:
carry += x.udigit(i) + y.udigit(i-xofs)
x.setdigit(i, carry)
carry >>= SHIFT
i += 1
iend = xofs + m
while carry and i < iend:
carry += x.udigit(i)
x.setdigit(i, carry)
carry >>= SHIFT
i += 1
return carry
def _v_isub(x, xofs, m, y, n):
"""
x and y are rbigints, m >= n required. x.digits[0:n] is modified in place,
by substracting y.digits[0:m] to it. Borrows are propagated as
far as x[m-1], and the remaining borrow (0 or 1) is returned.
Python adaptation: x is addressed relative to xofs!
"""
borrow = UDIGIT_TYPE(0)
assert m >= n
i = _load_unsigned_digit(xofs)
iend = xofs + n
while i < iend:
borrow = x.udigit(i) - y.udigit(i-xofs) - borrow
x.setdigit(i, borrow)
borrow >>= SHIFT
borrow &= 1
i += 1
iend = xofs + m
while borrow and i < iend:
borrow = x.udigit(i) - borrow
x.setdigit(i, borrow)
borrow >>= SHIFT
borrow &= 1
i += 1
return borrow
@specialize.argtype(2)
def _muladd1(a, n, extra=0):
"""Multiply by a single digit and add a single digit, ignoring the sign.
"""
size_a = a.numdigits()
z = rbigint([NULLDIGIT] * (size_a+1), 1)
assert extra & MASK == extra
carry = _widen_digit(extra)
i = 0
while i < size_a:
carry += a.widedigit(i) * n
z.setdigit(i, carry)
carry >>= SHIFT
i += 1
z.setdigit(i, carry)
z._normalize()
return z
def _v_lshift(z, a, m, d):
""" Shift digit vector a[0:m] d bits left, with 0 <= d < SHIFT. Put
* result in z[0:m], and return the d bits shifted out of the top.
"""
carry = 0
assert 0 <= d and d < SHIFT
i = 0
while i < m:
acc = a.widedigit(i) << d | carry
z.setdigit(i, acc)
carry = acc >> SHIFT
i += 1
return carry
def _v_rshift(z, a, m, d):
""" Shift digit vector a[0:m] d bits right, with 0 <= d < PyLong_SHIFT. Put
* result in z[0:m], and return the d bits shifted out of the bottom.
"""
carry = _widen_digit(0)
acc = _widen_digit(0)
mask = (1 << d) - 1
assert 0 <= d and d < SHIFT
i = m-1
while i >= 0:
acc = (carry << SHIFT) | a.widedigit(i)
carry = acc & mask
z.setdigit(i, acc >> d)
i -= 1
return carry
def _x_divrem(v1, w1):
""" Unsigned bigint division with remainder -- the algorithm """
size_v = v1.numdigits()
size_w = w1.numdigits()
assert size_v >= size_w and size_w > 1
v = rbigint([NULLDIGIT] * (size_v + 1), 1, size_v + 1)
w = rbigint([NULLDIGIT] * size_w, 1, size_w)
""" normalize: shift w1 left so that its top digit is >= PyLong_BASE/2.
shift v1 left by the same amount. Results go into w and v. """
d = SHIFT - bits_in_digit(w1.digit(abs(size_w-1)))
carry = _v_lshift(w, w1, size_w, d)
assert carry == 0
carry = _v_lshift(v, v1, size_v, d)
if carry != 0 or v.digit(abs(size_v-1)) >= w.digit(abs(size_w-1)):
v.setdigit(size_v, carry)
size_v += 1
""" Now v->ob_digit[size_v-1] < w->ob_digit[size_w-1], so quotient has
at most (and usually exactly) k = size_v - size_w digits. """
k = size_v - size_w
if k == 0:
assert _v_rshift(w, v, size_w, d) == 0
w._normalize()
return rbigint([NULLDIGIT]), w
assert k > 0
a = rbigint([NULLDIGIT] * k, 1, k)
wm1 = w.widedigit(abs(size_w-1))
wm2 = w.widedigit(abs(size_w-2))
j = size_v - 1
k -= 1
while k >= 0:
assert j >= 0
""" inner loop: divide vk[0:size_w+1] by w0[0:size_w], giving
single-digit quotient q, remainder in vk[0:size_w]. """
# estimate quotient digit q; may overestimate by 1 (rare)
if j >= size_v:
vtop = 0
else:
vtop = v.widedigit(j)
assert vtop <= wm1
vv = (vtop << SHIFT) | v.widedigit(abs(j-1))
q = vv / wm1
r = vv - wm1 * q
while wm2 * q > ((r << SHIFT) | v.widedigit(abs(j-2))):
q -= 1
r += wm1
#assert q <= MASK+1, We need to compare to BASE <=, but ehm, it gives a buildin long error. So we ignore this.
# subtract q*w0[0:size_w] from vk[0:size_w+1]
zhi = 0
i = 0
while i < size_w:
z = v.widedigit(k+i) + zhi - q * w.widedigit(i)
v.setdigit(k+i, z)
zhi = z >> SHIFT
i += 1
# add w back if q was too large (this branch taken rarely)
if vtop + zhi < 0:
carry = UDIGIT_TYPE(0)
i = 0
while i < size_w:
carry += v.udigit(k+i) + w.udigit(i)
v.setdigit(k+i, carry)
carry >>= SHIFT
i += 1
q -= 1
# store quotient digit
a.setdigit(k, q)
k -= 1
j -= 1
carry = _v_rshift(w, v, size_w, d)
assert carry == 0
a._normalize()
w._normalize()
return a, w
def _divrem(a, b):
""" Long division with remainder, top-level routine """
size_a = a.numdigits()
size_b = b.numdigits()
if b.sign == 0:
raise ZeroDivisionError("long division or modulo by zero")
if (size_a < size_b or
(size_a == size_b and
a.digit(abs(size_a-1)) < b.digit(abs(size_b-1)))):
# |a| < |b|
return NULLRBIGINT, a# result is 0
if size_b == 1:
z, urem = _divrem1(a, b.digit(0))
rem = rbigint([_store_digit(urem)], int(urem != 0), 1)
else:
z, rem = _x_divrem(a, b)
# Set the signs.
# The quotient z has the sign of a*b;
# the remainder r has the sign of a,
# so a = b*z + r.
if a.sign != b.sign:
z.sign = - z.sign
if a.sign < 0 and rem.sign != 0:
rem.sign = - rem.sign
return z, rem
# ______________ conversions to double _______________
def _AsScaledDouble(v):
"""
NBITS_WANTED should be > the number of bits in a double's precision,
but small enough so that 2**NBITS_WANTED is within the normal double
range. nbitsneeded is set to 1 less than that because the most-significant
Python digit contains at least 1 significant bit, but we don't want to
bother counting them (catering to the worst case cheaply).
57 is one more than VAX-D double precision; I (Tim) don't know of a double
format with more precision than that; it's 1 larger so that we add in at
least one round bit to stand in for the ignored least-significant bits.
"""
NBITS_WANTED = 57
if v.sign == 0:
return 0.0, 0
i = v.numdigits() - 1
sign = v.sign
x = float(v.digit(i))
nbitsneeded = NBITS_WANTED - 1
# Invariant: i Python digits remain unaccounted for.
while i > 0 and nbitsneeded > 0:
i -= 1
x = x * FLOAT_MULTIPLIER + float(v.digit(i))
nbitsneeded -= SHIFT
# There are i digits we didn't shift in. Pretending they're all
# zeroes, the true value is x * 2**(i*SHIFT).
exponent = i
assert x > 0.0
return x * sign, exponent
##def ldexp(x, exp):
## assert type(x) is float
## lb1 = LONG_BIT - 1
## multiplier = float(1 << lb1)
## while exp >= lb1:
## x *= multiplier
## exp -= lb1
## if exp:
## x *= float(1 << exp)
## return x
# note that math.ldexp checks for overflows,
# while the C ldexp is not guaranteed to do.
# XXX make sure that we don't ignore this!
@jit.dont_look_inside
def _AsDouble(n):
""" Get a C double from a bigint object. """
from rpython.rlib import rfloat
DBL_MANT_DIG = rfloat.DBL_MANT_DIG
DBL_MAX_EXP = rfloat.DBL_MAX_EXP
assert DBL_MANT_DIG < r_ulonglong.BITS
sign = n.sign
if sign == 0:
return 0.0
elif sign < 0:
n = n.neg()
exp = n.bit_length()
shift = DBL_MANT_DIG + 2 - exp
if shift >= 0:
q = _AsULonglong_mask(n) << shift
if not we_are_translated():
assert q == n.tolong() << shift
else:
shift = -shift
n2 = n.rshift(shift)
q = _AsULonglong_mask(n2)
if not we_are_translated():
assert q == n2.tolong()
if not n.eq(n2.lshift(shift)):
q |= 1
q = (q >> 2) + r_uint((bool(q & 2) and bool(q & 5)))
if exp > DBL_MAX_EXP or (exp == DBL_MAX_EXP and
q == r_ulonglong(1) << DBL_MANT_DIG):
raise OverflowError("integer too large to convert to float")
ad = math.ldexp(float(q), exp - DBL_MANT_DIG)
if sign < 0:
ad = -ad
return ad
@specialize.arg(0)
def _loghelper(func, arg):
"""
A decent logarithm is easy to compute even for huge bigints, but libm can't
do that by itself -- loghelper can. func is log or log10.
Note that overflow isn't possible: a bigint can contain
no more than INT_MAX * SHIFT bits, so has value certainly less than
2**(2**64 * 2**16) == 2**2**80, and log2 of that is 2**80, which is
small enough to fit in an IEEE single. log and log10 are even smaller.
"""
x, e = _AsScaledDouble(arg)
if x <= 0.0:
raise ValueError
return func(x) + (e * float(SHIFT) * func(2.0))
BASE_AS_FLOAT = float(1 << SHIFT)
BitLengthTable = ''.join(map(chr, [
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]))
def bits_in_digit(d):
d_bits = 0
while d >= 32:
d_bits += 6
d >>= 6
d_bits += ord(BitLengthTable[d])
return d_bits
def _truediv_result(result, negate):
if negate:
result = -result
return result
def _truediv_overflow():
raise OverflowError("integer division result too large for a float")
def _bigint_true_divide(a, b):
from rpython.rlib import rfloat
DBL_MANT_DIG = rfloat.DBL_MANT_DIG
DBL_MAX_EXP = rfloat.DBL_MAX_EXP
DBL_MIN_EXP = rfloat.DBL_MIN_EXP
MANT_DIG_DIGITS = DBL_MANT_DIG // SHIFT
MANT_DIG_BITS = DBL_MANT_DIG % SHIFT
negate = (a.sign < 0) ^ (b.sign < 0)
if not b.tobool():
raise ZeroDivisionError("long division or modulo by zero")
if not a.tobool():
return _truediv_result(0.0, negate)
a_size = a.numdigits()
b_size = b.numdigits()
a_is_small = (a_size <= MANT_DIG_DIGITS or
(a_size == MANT_DIG_DIGITS+1 and
a.digit(MANT_DIG_DIGITS) >> MANT_DIG_BITS == 0))
b_is_small = (b_size <= MANT_DIG_DIGITS or
(b_size == MANT_DIG_DIGITS+1 and
b.digit(MANT_DIG_DIGITS) >> MANT_DIG_BITS == 0))
if a_is_small and b_is_small:
a_size -= 1
da = float(a.digit(a_size))
while True:
a_size -= 1
if a_size < 0:
break
da = da * BASE_AS_FLOAT + a.digit(a_size)
b_size -= 1
db = float(b.digit(b_size))
while True:
b_size -= 1
if b_size < 0:
break
db = db * BASE_AS_FLOAT + b.digit(b_size)
return _truediv_result(da / db, negate)
diff = a_size - b_size
if diff > sys.maxint/SHIFT - 1:
return _truediv_overflow()
elif diff < 1 - sys.maxint/SHIFT:
return _truediv_result(0.0, negate)
diff = (diff * SHIFT + bits_in_digit(a.digit(a_size - 1)) -
bits_in_digit(b.digit(b_size - 1)))
if diff > DBL_MAX_EXP:
return _truediv_overflow()
elif diff < DBL_MIN_EXP - DBL_MANT_DIG - 1:
return _truediv_result(0.0, negate)
shift = max(diff, DBL_MIN_EXP) - DBL_MANT_DIG - 2
inexact = False
if shift <= 0:
x = a.lshift(-shift)
else:
x = a.rshift(shift, dont_invert=True)
if not a.eq(x.lshift(shift)):
inexact = True
x, rem = _divrem(x, b)
if rem.tobool():
inexact = True
assert x.tobool()
x_size = x.numdigits()
x_bits = (x_size-1)*SHIFT + bits_in_digit(x.digit(x_size-1))
extra_bits = max(x_bits, DBL_MIN_EXP - shift) - DBL_MANT_DIG
assert extra_bits == 2 or extra_bits == 3
mask = r_uint(1 << (extra_bits - 1))
low = x.udigit(0) | r_uint(inexact)
if (low & mask) != 0 and (low & (3*mask-1)) != 0:
low += mask
x_digit_0 = low & ~(mask-1)
x_size -= 1
dx = 0.0
while x_size > 0:
dx += x.digit(x_size)
dx *= BASE_AS_FLOAT
x_size -= 1
dx += x_digit_0
if (shift + x_bits >= DBL_MAX_EXP and
(shift + x_bits > DBL_MAX_EXP or dx == math.ldexp(1.0, x_bits))):
return _truediv_overflow()
return _truediv_result(math.ldexp(dx, shift), negate)
BASE8 = '01234567'
BASE10 = '0123456789'
BASE16 = '0123456789abcdef'
def _format_base2_notzero(a, digits, prefix='', suffix=''):
base = len(digits)
accum = 0
accumbits = 0 = 0
i = base
while i > 1:
basebits += 1
i >>= 1
size_a = a.numdigits()
i = 5 + len(prefix) + len(suffix) + (size_a*SHIFT + basebits-1) // basebits
result = [chr(0)] * i
next_char_index = i
j = len(suffix)
while j > 0:
next_char_index -= 1
j -= 1
result[next_char_index] = suffix[j]
i = 0
while i < size_a:
accum |= a.widedigit(i) << accumbits
accumbits += SHIFT
assert accumbits >= basebits
while 1:
cdigit = intmask(accum & (base - 1))
next_char_index -= 1
assert next_char_index >= 0
result[next_char_index] = digits[cdigit]
accumbits -= basebits
accum >>= basebits
if i < size_a - 1:
if accumbits < basebits:
break
else:
if accum <= 0:
break
i += 1
j = len(prefix)
while j > 0:
next_char_index -= 1
j -= 1
result[next_char_index] = prefix[j]
if a.sign < 0:
next_char_index -= 1
result[next_char_index] = '-'
assert next_char_index >= 0
return ''.join(result[next_char_index:])
class _PartsCache(object):
def __init__(self):
self.parts_cache = [None] * 34
self.mindigits = [0] * 34
for i in range(34):
base = i + 3
mindigits = 1
while base ** mindigits < sys.maxint:
mindigits += 1
mindigits -= 1
self.mindigits[i] = mindigits
def get_cached_parts(self, base):
index = base - 3
res = self.parts_cache[index]
if res is None:
rbase = rbigint.fromint(base)
part = rbase.pow(rbigint.fromint(self.mindigits[index]))
res = [part]
self.parts_cache[base - 3] = res
return res
def get_mindigits(self, base):
return self.mindigits[base - 3]
_parts_cache = _PartsCache()
def _format_int_general(val, digits):
base = len(digits)
out = []
while val:
out.append(digits[val % base])
val //= base
out.reverse()
return "".join(out)
def _format_int10(val, digits):
return str(val)
@specialize.arg(7)
def _format_recursive(x, i, output, pts, digits, size_prefix, mindigits, _format_int):
if i < 0:
if output.getlength() == size_prefix:
if x.sign != 0:
s = _format_int(x.toint(), digits)
output.append(s)
else:
s = _format_int(x.toint(), digits)
output.append_multiple_char(digits[0], mindigits - len(s))
output.append(s)
else:
top, bot = x.divmod(pts[i])
_format_recursive(top, i-1, output, pts, digits, size_prefix, mindigits, _format_int)
_format_recursive(bot, i-1, output, pts, digits, size_prefix, mindigits, _format_int)
def _format(x, digits, prefix='', suffix=''):
if x.sign == 0:
return prefix + "0" + suffix
base = len(digits)
assert base >= 2 and base <= 36
if (base & (base - 1)) == 0:
return _format_base2_notzero(x, digits, prefix, suffix)
negative = x.sign < 0
if negative:
x = x.neg()
rbase = rbigint.fromint(base)
two = rbigint.fromint(2)
pts = _parts_cache.get_cached_parts(base)
mindigits = _parts_cache.get_mindigits(base)
stringsize = mindigits
startindex = 0
for startindex, part in enumerate(pts):
if not part.lt(x):
break
stringsize *= 2
else:
while pts[-1].lt(x):
pts.append(pts[-1].pow(two))
stringsize *= 2
startindex = len(pts) - 1
startindex -= 1
output = StringBuilder(stringsize)
if negative:
output.append('-')
output.append(prefix)
if digits == BASE10:
_format_recursive(
x, startindex, output, pts, digits, output.getlength(), mindigits,
_format_int10)
else:
_format_recursive(
x, startindex, output, pts, digits, output.getlength(), mindigits,
_format_int_general)
output.append(suffix)
return output.build()
@specialize.arg(1)
def _bitwise(a, op, b):
""" Bitwise and/or/xor operations """
if a.sign < 0:
a = a.invert()
maska = MASK
else:
maska = 0
if b.sign < 0:
b = b.invert()
maskb = MASK
else:
maskb = 0
negz = 0
if op == '^':
if maska != maskb:
maska ^= MASK
negz = -1
elif op == '&':
if maska and maskb:
op = '|'
maska ^= MASK
maskb ^= MASK
negz = -1
elif op == '|':
if maska or maskb:
op = '&'
maska ^= MASK
maskb ^= MASK
negz = -1
else:
assert 0, "unreachable"
size_a = a.numdigits()
size_b = b.numdigits()
if op == '&':
if maska:
size_z = size_b
else:
if maskb:
size_z = size_a
else:
size_z = min(size_a, size_b)
else:
size_z = max(size_a, size_b)
z = rbigint([NULLDIGIT] * size_z, 1, size_z)
i = 0
while i < size_z:
if i < size_a:
diga = a.digit(i) ^ maska
else:
diga = maska
if i < size_b:
digb = b.digit(i) ^ maskb
else:
digb = maskb
if op == '&':
z.setdigit(i, diga & digb)
elif op == '|':
z.setdigit(i, diga | digb)
elif op == '^':
z.setdigit(i, diga ^ digb)
i += 1
z._normalize()
if negz == 0:
return z
return z.invert()
@specialize.arg(1)
def _int_bitwise(a, op, b):
""" Bitwise and/or/xor operations """
if not int_in_valid_range(b):
return _bitwise(a, op, rbigint.fromint(b))
if a.sign < 0:
a = a.invert()
maska = MASK
else:
maska = 0
if b < 0:
b = ~b
maskb = MASK
else:
maskb = 0
negz = 0
if op == '^':
if maska != maskb:
maska ^= MASK
negz = -1
elif op == '&':
if maska and maskb:
op = '|'
maska ^= MASK
maskb ^= MASK
negz = -1
elif op == '|':
if maska or maskb:
op = '&'
maska ^= MASK
maskb ^= MASK
negz = -1
size_a = a.numdigits()
if op == '&':
if maska:
size_z = 1
else:
if maskb:
size_z = size_a
else:
size_z = 1
else:
size_z = size_a
z = rbigint([NULLDIGIT] * size_z, 1, size_z)
i = 0
while i < size_z:
if i < size_a:
diga = a.digit(i) ^ maska
else:
diga = maska
if i == 0:
digb = b ^ maskb
else:
digb = maskb
if op == '&':
z.setdigit(i, diga & digb)
elif op == '|':
z.setdigit(i, diga | digb)
elif op == '^':
z.setdigit(i, diga ^ digb)
i += 1
z._normalize()
if negz == 0:
return z
return z.invert()
ULONGLONG_BOUND = r_ulonglong(1L << (r_longlong.BITS-1))
LONGLONG_MIN = r_longlong(-(1L << (r_longlong.BITS-1)))
def _AsLongLong(v):
"""
Get a r_longlong integer from a bigint object.
Raises OverflowError if overflow occurs.
"""
x = _AsULonglong_ignore_sign(v)
if x >= ULONGLONG_BOUND:
if x == ULONGLONG_BOUND and v.sign < 0:
x = LONGLONG_MIN
else:
raise OverflowError
else:
x = r_longlong(x)
if v.sign < 0:
x = -x
return x
def _AsULonglong_ignore_sign(v):
x = r_ulonglong(0)
i = v.numdigits() - 1
while i >= 0:
prev = x
x = (x << SHIFT) + r_ulonglong(v.widedigit(i))
if (x >> SHIFT) != prev:
raise OverflowError(
"long int too large to convert to unsigned long long int")
i -= 1
return x
def make_unsigned_mask_conversion(T):
def _As_unsigned_mask(v):
x = T(0)
i = v.numdigits() - 1
while i >= 0:
x = (x << SHIFT) + T(v.digit(i))
i -= 1
if v.sign < 0:
x = -x
return x
return _As_unsigned_mask
_AsULonglong_mask = make_unsigned_mask_conversion(r_ulonglong)
_AsUInt_mask = make_unsigned_mask_conversion(r_uint)
def _hash(v):
i = v.numdigits() - 1
sign = v.sign
x = r_uint(0)
LONG_BIT_SHIFT = LONG_BIT - SHIFT
while i >= 0:
> LONG_BIT_SHIFT)
x += v.udigit(i)
if x < v.udigit(i):
x += 1
i -= 1
res = intmask(intmask(x) * sign)
return res
def digits_max_for_base(base):
dec_per_digit = 1
while base ** dec_per_digit < MASK:
dec_per_digit += 1
dec_per_digit -= 1
return base ** dec_per_digit
BASE_MAX = [0, 1] + [digits_max_for_base(_base) for _base in range(2, 37)]
DEC_MAX = digits_max_for_base(10)
assert DEC_MAX == BASE_MAX[10]
def _decimalstr_to_bigint(s):
p = 0
lim = len(s)
sign = False
if s[p] == '-':
sign = True
p += 1
elif s[p] == '+':
p += 1
a = rbigint()
tens = 1
dig = 0
ord0 = ord('0')
while p < lim:
dig = dig * 10 + ord(s[p]) - ord0
p += 1
tens *= 10
if tens == DEC_MAX or p == lim:
a = _muladd1(a, tens, dig)
tens = 1
dig = 0
if sign and a.sign == 1:
a.sign = -1
return a
def parse_digit_string(parser):
base = parser.base
if (base & (base - 1)) == 0 and base >= 2:
return parse_string_from_binary_base(parser)
a = rbigint()
digitmax = BASE_MAX[base]
tens, dig = 1, 0
while True:
digit = parser.next_digit()
if tens == digitmax or digit < 0:
a = _muladd1(a, tens, dig)
if digit < 0:
break
dig = digit
tens = base
else:
dig = dig * base + digit
tens *= base
a.sign *= parser.sign
return a
def parse_string_from_binary_base(parser):
from rpython.rlib.rstring import ParseStringError
base = parser.base
if base == 2: bits_per_char = 1
elif base == 4: bits_per_char = 2
elif base == 8: bits_per_char = 3
elif base == 16: bits_per_char = 4
elif base == 32: bits_per_char = 5
else:
raise AssertionError
n = 0
while parser.next_digit() >= 0:
n += 1
try:
b = ovfcheck(n * bits_per_char)
b = ovfcheck(b + (SHIFT - 1))
except OverflowError:
raise ParseStringError("long string too large to convert")
b = (b // SHIFT) or 1
z = rbigint([NULLDIGIT] * b, sign=parser.sign)
accum = _widen_digit(0)
bits_in_accum = 0
pdigit = 0
for _ in range(n):
k = parser.prev_digit()
accum |= _widen_digit(k) << bits_in_accum
bits_in_accum += bits_per_char
if bits_in_accum >= SHIFT:
z.setdigit(pdigit, accum)
pdigit += 1
assert pdigit <= b
accum >>= SHIFT
bits_in_accum -= SHIFT
if bits_in_accum:
z.setdigit(pdigit, accum)
z._normalize()
return z
| false | true |
f7ff4abc7bdfb30fde527420eded549637be4c74 | 2,609 | py | Python | test/gen_big.py | sdn-ixp/sdx-parallel | aa7f3d01ac22c56b5882de50884b0473c8bb6ba2 | [
"Apache-2.0"
] | 49 | 2015-11-15T00:02:35.000Z | 2021-02-12T22:03:57.000Z | test/gen_big.py | sdn-ixp/sdx-parallel | aa7f3d01ac22c56b5882de50884b0473c8bb6ba2 | [
"Apache-2.0"
] | 6 | 2016-06-20T06:01:36.000Z | 2019-10-22T19:34:27.000Z | test/gen_big.py | sdn-ixp/sdx-parallel | aa7f3d01ac22c56b5882de50884b0473c8bb6ba2 | [
"Apache-2.0"
] | 21 | 2015-11-22T13:02:07.000Z | 2019-06-06T18:15:11.000Z | '''
Created on Jul 4, 2016
@author: Marc Pucci (Vencore Labs)
'''
'''
generate really big configurations
'''
import sys
import genlib
def main (argv):
global outdir
if len(argv) < 2:
print 'usage: gen_big #_of_participants'
exit()
limit = int(argv[1])
maxsub = 254
minsub = 1
peers = "peers "
port = 80
print "mode multi-switch"
print "participants " + str(limit)
for i in range(1, limit + 1):
peers += " " + str(i)
print peers
print
a = 172
b = 0
d = minsub
c = 0
for i in range(1, limit + 1):
if d > maxsub:
d = minsub
c += 1
print "participant " + str(i) + " " + str(i) + " PORT MAC " + str(a) + "." + str(b) + "." + str(c) + "." + str(d) + "/16"
d += 1
print
print "host AS ROUTER _ IP # testnode names of form a1_100 a1_110"
print
d = minsub
c = 0
j = 140
for i in range(1, limit + 1):
if d > maxsub:
d = minsub
c += 1
if i == 1:
print "announce " + str(i) + " 100.0.0.0/24"
else:
print "announce " + str(i) + " " + str(j) + ".0.0.0/24"
if (i % 25) == 0:
j += 1
d += 1
print
p = port
listener = "listener AUTOGEN "
for i in range(2, limit + 1):
listener += " " + str(p)
print "outflow " + genlib.part_router2host(1, 0) + " -t " + str(p) + " > " + genlib.part2as(i)
p += 1
print
print listener
print
print "test regress {\n\texec a1 sleep " + str(limit/3) + "\n\ttest xfer\n}"
print
print "test init {\n\tlistener\n}"
print
p = port
print "test xfer {"
j = 140
for i in range(2, limit + 1):
print "\tverify " + genlib.part_router2host(1, 0) + "_100 " + genlib.part_router2host(i, 0) + "_" + str(j) + " " + str(p)
if (i % 25) == 0:
j += 1
p += 1
print "}"
print
print "test info {"
print "\tlocal ovs-ofctl dump-flows S1"
print "\tlocal ovs-ofctl dump-flows S2"
print "\tlocal ovs-ofctl dump-flows S3"
print "\tlocal ovs-ofctl dump-flows S4"
print "\texec a1 ip route"
print "\tbgp a1"
print "\texec b1 ip route"
print "\tbgp b1"
print "\texec c1 ip route"
print "\tbgp c1"
print "}"
print
print "test flush {"
for router in ('a1', 'b1'):
print "\texec " + router + " ip -s -s neigh flush all"
print "}"
if __name__ == "__main__":
main(sys.argv)
| 22.686957 | 129 | 0.485243 | '''
Created on Jul 4, 2016
@author: Marc Pucci (Vencore Labs)
'''
'''
generate really big configurations
'''
import sys
import genlib
def main (argv):
global outdir
if len(argv) < 2:
print 'usage: gen_big #_of_participants'
exit()
limit = int(argv[1])
maxsub = 254
minsub = 1
peers = "peers "
port = 80
print "mode multi-switch"
print "participants " + str(limit)
for i in range(1, limit + 1):
peers += " " + str(i)
print peers
print
a = 172
b = 0
d = minsub
c = 0
for i in range(1, limit + 1):
if d > maxsub:
d = minsub
c += 1
print "participant " + str(i) + " " + str(i) + " PORT MAC " + str(a) + "." + str(b) + "." + str(c) + "." + str(d) + "/16"
d += 1
print
print "host AS ROUTER _ IP # testnode names of form a1_100 a1_110"
print
d = minsub
c = 0
j = 140
for i in range(1, limit + 1):
if d > maxsub:
d = minsub
c += 1
if i == 1:
print "announce " + str(i) + " 100.0.0.0/24"
else:
print "announce " + str(i) + " " + str(j) + ".0.0.0/24"
if (i % 25) == 0:
j += 1
d += 1
print
p = port
listener = "listener AUTOGEN "
for i in range(2, limit + 1):
listener += " " + str(p)
print "outflow " + genlib.part_router2host(1, 0) + " -t " + str(p) + " > " + genlib.part2as(i)
p += 1
print
print listener
print
print "test regress {\n\texec a1 sleep " + str(limit/3) + "\n\ttest xfer\n}"
print
print "test init {\n\tlistener\n}"
print
p = port
print "test xfer {"
j = 140
for i in range(2, limit + 1):
print "\tverify " + genlib.part_router2host(1, 0) + "_100 " + genlib.part_router2host(i, 0) + "_" + str(j) + " " + str(p)
if (i % 25) == 0:
j += 1
p += 1
print "}"
print
print "test info {"
print "\tlocal ovs-ofctl dump-flows S1"
print "\tlocal ovs-ofctl dump-flows S2"
print "\tlocal ovs-ofctl dump-flows S3"
print "\tlocal ovs-ofctl dump-flows S4"
print "\texec a1 ip route"
print "\tbgp a1"
print "\texec b1 ip route"
print "\tbgp b1"
print "\texec c1 ip route"
print "\tbgp c1"
print "}"
print
print "test flush {"
for router in ('a1', 'b1'):
print "\texec " + router + " ip -s -s neigh flush all"
print "}"
if __name__ == "__main__":
main(sys.argv)
| false | true |
f7ff4b13830517d1b3cefd55138a9120f4c53607 | 1,312 | py | Python | tests/50_test_sphere_sph_solver_real_and_complex/test.py | valentinaschueller/sweet | 27e99c7a110c99deeadee70688c186d82b39ac90 | [
"MIT"
] | 6 | 2017-11-20T08:12:46.000Z | 2021-03-11T15:32:36.000Z | tests/50_test_sphere_sph_solver_real_and_complex/test.py | valentinaschueller/sweet | 27e99c7a110c99deeadee70688c186d82b39ac90 | [
"MIT"
] | 4 | 2018-02-02T21:46:33.000Z | 2022-01-11T11:10:27.000Z | tests/50_test_sphere_sph_solver_real_and_complex/test.py | valentinaschueller/sweet | 27e99c7a110c99deeadee70688c186d82b39ac90 | [
"MIT"
] | 12 | 2016-03-01T18:33:34.000Z | 2022-02-08T22:20:31.000Z | #! /usr/bin/env python3
import sys
import os
os.chdir(os.path.dirname(sys.argv[0]))
from mule_local.JobMule import *
from itertools import product
from mule.exec_program import *
exec_program('mule.benchmark.cleanup_all', catch_output=False)
jg = JobGeneration()
jg.compile.unit_test="test_sphere_sph_solver_real_and_complex"
jg.compile.plane_spectral_space="disable"
jg.compile.sphere_spectral_space="enable"
jg.compile.mode = "release"
jg.runtime.sphere_radius = 1
jg.runtime.sphere_rotating_coriolis_omega = 1
unique_id_filter = []
unique_id_filter.append('compile')
jg.unique_id_filter = unique_id_filter
#params_runtime_mode_res = [64, 128, 256, 512, 1024, 2048]
params_runtime_mode_res = [64, 128, 256, 512, 1024]
params_runtime_r = [1, 1e3, 1e6]
params_runtime_f = [1, 1e-3, 1e-6]
jg.runtime.verbosity = 5
for (
jg.runtime.space_res_spectral,
jg.runtime.sphere_radius,
jg.runtime.sphere_rotating_coriolis_omega,
) in product(
params_runtime_mode_res,
params_runtime_r,
params_runtime_f,
):
jg.gen_jobscript_directory()
exitcode = exec_program('mule.benchmark.jobs_run_directly', catch_output=False)
if exitcode != 0:
sys.exit(exitcode)
print("Benchmarks successfully finished")
exec_program('mule.benchmark.cleanup_all', catch_output=False)
| 23.017544 | 79 | 0.766006 |
import sys
import os
os.chdir(os.path.dirname(sys.argv[0]))
from mule_local.JobMule import *
from itertools import product
from mule.exec_program import *
exec_program('mule.benchmark.cleanup_all', catch_output=False)
jg = JobGeneration()
jg.compile.unit_test="test_sphere_sph_solver_real_and_complex"
jg.compile.plane_spectral_space="disable"
jg.compile.sphere_spectral_space="enable"
jg.compile.mode = "release"
jg.runtime.sphere_radius = 1
jg.runtime.sphere_rotating_coriolis_omega = 1
unique_id_filter = []
unique_id_filter.append('compile')
jg.unique_id_filter = unique_id_filter
params_runtime_mode_res = [64, 128, 256, 512, 1024]
params_runtime_r = [1, 1e3, 1e6]
params_runtime_f = [1, 1e-3, 1e-6]
jg.runtime.verbosity = 5
for (
jg.runtime.space_res_spectral,
jg.runtime.sphere_radius,
jg.runtime.sphere_rotating_coriolis_omega,
) in product(
params_runtime_mode_res,
params_runtime_r,
params_runtime_f,
):
jg.gen_jobscript_directory()
exitcode = exec_program('mule.benchmark.jobs_run_directly', catch_output=False)
if exitcode != 0:
sys.exit(exitcode)
print("Benchmarks successfully finished")
exec_program('mule.benchmark.cleanup_all', catch_output=False)
| true | true |
f7ff4c005f3a2f7a55d76b9caac1b92f841771af | 1,708 | py | Python | 2.6/faster_rcnn/utils/config_helpers.py | waikato-datamining/cntk | 1b626407ef750dfbd4ad66fe9aed28487f2a4441 | [
"MIT"
] | null | null | null | 2.6/faster_rcnn/utils/config_helpers.py | waikato-datamining/cntk | 1b626407ef750dfbd4ad66fe9aed28487f2a4441 | [
"MIT"
] | null | null | null | 2.6/faster_rcnn/utils/config_helpers.py | waikato-datamining/cntk | 1b626407ef750dfbd4ad66fe9aed28487f2a4441 | [
"MIT"
] | null | null | null | from easydict import EasyDict
import numpy as np
def merge_configs(config_list):
if config_list == None or len(config_list) == 0:
return None
base_config = config_list[0]
if type(base_config) is dict:
base_config = EasyDict(base_config)
if type(base_config) is not EasyDict:
print("The argument given to 'merge_configs' have to be of type dict or EasyDict.")
return None
for i in range(len(config_list) - 1):
config_to_merge = config_list[i+1]
if type(config_to_merge) is dict:
config_to_merge = EasyDict(config_to_merge)
_merge_add_a_into_b(config_to_merge, base_config)
return base_config
def _merge_add_a_into_b(a, b):
"""
Merge config dictionary a into config dictionary b,
clobbering the options in b whenever they are also specified in a.
New options that are only in a will be added to b.
"""
if type(a) is not EasyDict:
return
for k, v in a.items():
# if the key from a is new to b simply add it
if not k in b:
b[k] = v
continue
# the types must match
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) for config key: {}').format(type(b[k]), type(v), k))
# recursively merge dicts
if type(v) is EasyDict:
try:
_merge_add_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
| 30.5 | 113 | 0.581967 | from easydict import EasyDict
import numpy as np
def merge_configs(config_list):
if config_list == None or len(config_list) == 0:
return None
base_config = config_list[0]
if type(base_config) is dict:
base_config = EasyDict(base_config)
if type(base_config) is not EasyDict:
print("The argument given to 'merge_configs' have to be of type dict or EasyDict.")
return None
for i in range(len(config_list) - 1):
config_to_merge = config_list[i+1]
if type(config_to_merge) is dict:
config_to_merge = EasyDict(config_to_merge)
_merge_add_a_into_b(config_to_merge, base_config)
return base_config
def _merge_add_a_into_b(a, b):
if type(a) is not EasyDict:
return
for k, v in a.items():
if not k in b:
b[k] = v
continue
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) for config key: {}').format(type(b[k]), type(v), k))
if type(v) is EasyDict:
try:
_merge_add_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
| true | true |
f7ff4c22cc5298e66484a5798e42672119487e02 | 1,080 | py | Python | forms.py | diogenesjusto/flask_leaderboard | 86dac90785e01747ffbde99e6ba65cf42e4c016e | [
"MIT"
] | null | null | null | forms.py | diogenesjusto/flask_leaderboard | 86dac90785e01747ffbde99e6ba65cf42e4c016e | [
"MIT"
] | null | null | null | forms.py | diogenesjusto/flask_leaderboard | 86dac90785e01747ffbde99e6ba65cf42e4c016e | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()], render_kw={"placeholder": "username"})
password = PasswordField('Password', validators=[DataRequired()], render_kw={"placeholder": "password"})
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegisterForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()], render_kw={"placeholder": "username"})
password = PasswordField('Password', validators=[DataRequired()], render_kw={"placeholder": "password"})
email = StringField('Email', validators=[DataRequired()], render_kw={"placeholder": "email"})
submit = SubmitField('Register')
# def validate_username(self, username):
# user = User.query.filter_by(username=username.data).first()
# if user is not None:
# raise ValidationError('Please use a different username.')
| 51.428571 | 108 | 0.724074 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()], render_kw={"placeholder": "username"})
password = PasswordField('Password', validators=[DataRequired()], render_kw={"placeholder": "password"})
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegisterForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()], render_kw={"placeholder": "username"})
password = PasswordField('Password', validators=[DataRequired()], render_kw={"placeholder": "password"})
email = StringField('Email', validators=[DataRequired()], render_kw={"placeholder": "email"})
submit = SubmitField('Register')
| true | true |
f7ff4c5d5727c22ea1e52f249de9daa2faa9c87d | 5,552 | py | Python | cinder/api/contrib/snapshot_manage.py | shubhamdang/cinder | 03a8ca07d5710771c597fd92de50103313ec7f76 | [
"Apache-2.0"
] | null | null | null | cinder/api/contrib/snapshot_manage.py | shubhamdang/cinder | 03a8ca07d5710771c597fd92de50103313ec7f76 | [
"Apache-2.0"
] | null | null | null | cinder/api/contrib/snapshot_manage.py | shubhamdang/cinder | 03a8ca07d5710771c597fd92de50103313ec7f76 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import client as http_client
from oslo_log import log as logging
from cinder.api.contrib import resource_common_manage
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import snapshot_manage
from cinder.api import validation
from cinder.api.views import manageable_snapshots as list_manageable_view
from cinder.api.views import snapshots as snapshot_views
from cinder.policies import manageable_snapshots as policy
from cinder import volume as cinder_volume
LOG = logging.getLogger(__name__)
class SnapshotManageController(wsgi.Controller):
"""The /os-snapshot-manage controller for the OpenStack API."""
_view_builder_class = snapshot_views.ViewBuilder
def __init__(self, *args, **kwargs):
super(SnapshotManageController, self).__init__(*args, **kwargs)
self.volume_api = cinder_volume.API()
self._list_manageable_view = list_manageable_view.ViewBuilder()
@wsgi.response(http_client.ACCEPTED)
@validation.schema(snapshot_manage.create)
def create(self, req, body):
"""Instruct Cinder to manage a storage snapshot object.
Manages an existing backend storage snapshot object (e.g. a Linux
logical volume or a SAN disk) by creating the Cinder objects required
to manage it, and possibly renaming the backend storage snapshot object
(driver dependent).
From an API perspective, this operation behaves very much like a
snapshot creation operation.
Required HTTP Body:
.. code-block:: json
{
"snapshot":
{
"volume_id": "<Cinder volume already exists in volume backend>",
"ref":
"<Driver-specific reference to the existing storage object>"
}
}
See the appropriate Cinder drivers' implementations of the
manage_snapshot method to find out the accepted format of 'ref'.
For example,in LVM driver, it will be the logic volume name of snapshot
which you want to manage.
This API call will return with an error if any of the above elements
are missing from the request, or if the 'volume_id' element refers to
a cinder volume that could not be found.
The snapshot will later enter the error state if it is discovered that
'ref' is bad.
Optional elements to 'snapshot' are::
name A name for the new snapshot.
description A description for the new snapshot.
metadata Key/value pairs to be associated with the new snapshot.
"""
context = req.environ['cinder.context']
snapshot = body['snapshot']
# Check whether volume exists
volume_id = snapshot['volume_id']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, volume_id)
context.authorize(policy.MANAGE_POLICY, target_obj=volume)
LOG.debug('Manage snapshot request body: %s', body)
snapshot_parameters = {}
snapshot_parameters['metadata'] = snapshot.get('metadata', None)
snapshot_parameters['description'] = snapshot.get('description', None)
snapshot_parameters['name'] = snapshot.get('name')
# Not found exception will be handled at the wsgi level
new_snapshot = self.volume_api.manage_existing_snapshot(
context,
snapshot['ref'],
volume,
**snapshot_parameters)
return self._view_builder.detail(req, new_snapshot)
@wsgi.extends
def index(self, req):
"""Returns a summary list of snapshots available to manage."""
context = req.environ['cinder.context']
context.authorize(policy.LIST_MANAGEABLE_POLICY)
return resource_common_manage.get_manageable_resources(
req, False, self.volume_api.get_manageable_snapshots,
self._list_manageable_view)
@wsgi.extends
def detail(self, req):
"""Returns a detailed list of snapshots available to manage."""
context = req.environ['cinder.context']
context.authorize(policy.LIST_MANAGEABLE_POLICY)
return resource_common_manage.get_manageable_resources(
req, True, self.volume_api.get_manageable_snapshots,
self._list_manageable_view)
class Snapshot_manage(extensions.ExtensionDescriptor):
"""Allows existing backend storage to be 'managed' by Cinder."""
name = 'SnapshotManage'
alias = 'os-snapshot-manage'
updated = '2014-12-31T00:00:00+00:00'
def get_resources(self):
controller = SnapshotManageController()
return [extensions.ResourceExtension(Snapshot_manage.alias,
controller,
collection_actions=
{'detail': 'GET'})]
| 38.555556 | 79 | 0.675612 |
from http import client as http_client
from oslo_log import log as logging
from cinder.api.contrib import resource_common_manage
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import snapshot_manage
from cinder.api import validation
from cinder.api.views import manageable_snapshots as list_manageable_view
from cinder.api.views import snapshots as snapshot_views
from cinder.policies import manageable_snapshots as policy
from cinder import volume as cinder_volume
LOG = logging.getLogger(__name__)
class SnapshotManageController(wsgi.Controller):
_view_builder_class = snapshot_views.ViewBuilder
def __init__(self, *args, **kwargs):
super(SnapshotManageController, self).__init__(*args, **kwargs)
self.volume_api = cinder_volume.API()
self._list_manageable_view = list_manageable_view.ViewBuilder()
@wsgi.response(http_client.ACCEPTED)
@validation.schema(snapshot_manage.create)
def create(self, req, body):
context = req.environ['cinder.context']
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
volume = self.volume_api.get(context, volume_id)
context.authorize(policy.MANAGE_POLICY, target_obj=volume)
LOG.debug('Manage snapshot request body: %s', body)
snapshot_parameters = {}
snapshot_parameters['metadata'] = snapshot.get('metadata', None)
snapshot_parameters['description'] = snapshot.get('description', None)
snapshot_parameters['name'] = snapshot.get('name')
new_snapshot = self.volume_api.manage_existing_snapshot(
context,
snapshot['ref'],
volume,
**snapshot_parameters)
return self._view_builder.detail(req, new_snapshot)
@wsgi.extends
def index(self, req):
context = req.environ['cinder.context']
context.authorize(policy.LIST_MANAGEABLE_POLICY)
return resource_common_manage.get_manageable_resources(
req, False, self.volume_api.get_manageable_snapshots,
self._list_manageable_view)
@wsgi.extends
def detail(self, req):
context = req.environ['cinder.context']
context.authorize(policy.LIST_MANAGEABLE_POLICY)
return resource_common_manage.get_manageable_resources(
req, True, self.volume_api.get_manageable_snapshots,
self._list_manageable_view)
class Snapshot_manage(extensions.ExtensionDescriptor):
name = 'SnapshotManage'
alias = 'os-snapshot-manage'
updated = '2014-12-31T00:00:00+00:00'
def get_resources(self):
controller = SnapshotManageController()
return [extensions.ResourceExtension(Snapshot_manage.alias,
controller,
collection_actions=
{'detail': 'GET'})]
| true | true |
f7ff4cfbef645e43f1c01141884507fb30834287 | 1,793 | py | Python | app/utils/seedCrashes.py | adriacabeza/Volvo-Challenge | 66d899b55e80a4c97fb393bc4cf261e93b261a7c | [
"CNRI-Python"
] | 7 | 2019-03-10T09:16:14.000Z | 2019-11-27T10:43:57.000Z | app/utils/seedCrashes.py | adriacabeza/Volvo-Challenge | 66d899b55e80a4c97fb393bc4cf261e93b261a7c | [
"CNRI-Python"
] | 5 | 2021-03-18T22:47:15.000Z | 2022-03-11T23:42:12.000Z | app/utils/seedCrashes.py | adriacabeza/Volvo-Challenge | 66d899b55e80a4c97fb393bc4cf261e93b261a7c | [
"CNRI-Python"
] | 1 | 2020-12-28T08:22:47.000Z | 2020-12-28T08:22:47.000Z | from werkzeug.security import generate_password_hash
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
import os
data =[
{"impactAngle":-341.54339233899367,"_id":20175, "user": "Robert", "date": "10/03/2019", "car": "Volvo S90"},
{"impactAngle":-32.74137379765352,"_id":1171, "user": "Robert", "date": "27/05/2018", "car": "Volvo V40"},
{"impactAngle":-308.3000606298933,"_id":20387, "user": "Michelle", "date": "04/11/2017", "car": "Volvo XC40"},
{"impactAngle":-84.77214151730084,"_id":20209, "user": "John", "date": "31/07/2017", "car": "Volvo XC90"},
{"impactAngle":-159.1955345095375,"_id":1690, "user": "Johnny", "date": "1/02/2016", "car": "Volvo V90"},
{"impactAngle":-8.4255066323192,"_id":5558, "user": "Robert", "date": "30/10/2015", "car": "Volvo S90"},
{"impactAngle":294.56235398164944,"_id":20358, "user": "Johnny", "date": "15/10/2015", "car": "Volvo XC60"},
{"impactAngle":-260.5895797216587,"_id":5512, "user": "Michelle", "date": "23/08/2015", "car": "Volvo S60"}
]
def main(isOwner):
# Connect to the DB
mongo_uri = os.environ.get("MONGODB_URI", "mongodb://heroku_xv3vfwld:l3f3d2fv550d1akktp8m9uqj8e@ds119380.mlab.com:19380/heroku_xv3vfwld")
client = MongoClient(mongo_uri)
db = client['heroku_xv3vfwld']
collection = db['crashes']
# Ask for data to store
pass_hash = generate_password_hash(password, method='pbkdf2:sha256')
# Insert the user in the DB
try:
collection.insert({"_id": user, "password": pass_hash, "owner": isOwner})
print ("User created.")
except DuplicateKeyError:
print ("User already present in DB.")
if __name__ == '__main__':
ans = input('Introduce owners? y|n').lower()
isOwner = ans == 'y' or ans == 'yes'
while True:
main(isOwner) | 43.731707 | 141 | 0.660904 | from werkzeug.security import generate_password_hash
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
import os
data =[
{"impactAngle":-341.54339233899367,"_id":20175, "user": "Robert", "date": "10/03/2019", "car": "Volvo S90"},
{"impactAngle":-32.74137379765352,"_id":1171, "user": "Robert", "date": "27/05/2018", "car": "Volvo V40"},
{"impactAngle":-308.3000606298933,"_id":20387, "user": "Michelle", "date": "04/11/2017", "car": "Volvo XC40"},
{"impactAngle":-84.77214151730084,"_id":20209, "user": "John", "date": "31/07/2017", "car": "Volvo XC90"},
{"impactAngle":-159.1955345095375,"_id":1690, "user": "Johnny", "date": "1/02/2016", "car": "Volvo V90"},
{"impactAngle":-8.4255066323192,"_id":5558, "user": "Robert", "date": "30/10/2015", "car": "Volvo S90"},
{"impactAngle":294.56235398164944,"_id":20358, "user": "Johnny", "date": "15/10/2015", "car": "Volvo XC60"},
{"impactAngle":-260.5895797216587,"_id":5512, "user": "Michelle", "date": "23/08/2015", "car": "Volvo S60"}
]
def main(isOwner):
mongo_uri = os.environ.get("MONGODB_URI", "mongodb://heroku_xv3vfwld:l3f3d2fv550d1akktp8m9uqj8e@ds119380.mlab.com:19380/heroku_xv3vfwld")
client = MongoClient(mongo_uri)
db = client['heroku_xv3vfwld']
collection = db['crashes']
pass_hash = generate_password_hash(password, method='pbkdf2:sha256')
try:
collection.insert({"_id": user, "password": pass_hash, "owner": isOwner})
print ("User created.")
except DuplicateKeyError:
print ("User already present in DB.")
if __name__ == '__main__':
ans = input('Introduce owners? y|n').lower()
isOwner = ans == 'y' or ans == 'yes'
while True:
main(isOwner) | true | true |
f7ff4cfc6064b2d7b0ce92deef8d2d47f45d218a | 5,694 | py | Python | recognize_video.py | pystudent1913/proyecto-reconocimiento-facial | 881fb2f724b43b93b224dd591e250e0f2f078764 | [
"MIT"
] | null | null | null | recognize_video.py | pystudent1913/proyecto-reconocimiento-facial | 881fb2f724b43b93b224dd591e250e0f2f078764 | [
"MIT"
] | 7 | 2021-11-25T06:26:28.000Z | 2021-11-25T06:26:37.000Z | recognize_video.py | pystudent1913/proyecto-reconocimiento-facial | 881fb2f724b43b93b224dd591e250e0f2f078764 | [
"MIT"
] | null | null | null | # USAGE
# python recognize_video.py --detector face_detection_model \
# --embedding-model openface_nn4.small2.v1.t7 \
# --recognizer output/recognizer.pickle \
# --le output/le.pickle
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import pickle
import time
import cv2
import os
import requests
import json
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--detector", required=True,
help="path to OpenCV's deep learning face detector")
ap.add_argument("-m", "--embedding-model", required=True,
help="path to OpenCV's deep learning face embedding model")
ap.add_argument("-r", "--recognizer", required=True,
help="path to model trained to recognize faces")
ap.add_argument("-l", "--le", required=True,
help="path to label encoder")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# load our serialized face detector from disk
print("[INFO] loading face detector...")
protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
modelPath = os.path.sep.join([args["detector"],
"res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# load our serialized face embedding model from disk
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])
# load the actual face recognition model along with the label encoder
recognizer = pickle.loads(open(args["recognizer"], "rb").read())
le = pickle.loads(open(args["le"], "rb").read())
# initialize the video stream, then allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# start the FPS throughput estimator
fps = FPS().start()
contador = 0
finded = False
# variable to handle the login
isLogged = False
probability = 0.0
user=""
def handleLoggin(username):
print("""
FUISTE LOGEADO CON EXITO
HOLA CRISTIAN FABRIZIO SOTOMAYOR GONZALES
CODIGO 20162019
""")
print('username', username)
res = requests.get('http://127.0.0.1:5000/')
response = json.loads(res.text)
print('response', response)
iterar = True
# loop over frames from the video file stream
while True:
# grab the frame from the threaded video stream
frame = vs.read()
# resize the frame to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
frame = imutils.resize(frame, width=600)
(h, w) = frame.shape[:2]
# construct a blob from the image
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(frame, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
# apply OpenCV's deep learning-based face detector to localize
# faces in the input image
detector.setInput(imageBlob)
detections = detector.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections
if confidence > args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for
# the face
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# extract the face ROI
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
if fW < 20 or fH < 20:
continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,
(96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
# perform classification to recognize the face
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
# draw the bounding box of the face along with the
# associated probability
if isLogged == False:
text = "{}: {:.2f}%".format(name, proba * 100)
else:
text = "{}: {:.2f}% -- LOGGED".format(user, probability)
y = startY - 10 if startY - 10 > 10 else startY + 10
if isLogged == False:
if(name == 'cristian' and proba > 0.5):
print('hola', contador)
cv2.rectangle(frame, (startX, startY), (endX, endY),
(224, 0, 0), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# finded = True
print('apagate')
# break
if(isLogged is not True):
isLogged = True
probability = proba * 100
user = name
handleLoggin(name)
else:
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
else:
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 255, 0), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
if finded:
break
contador = contador + 1
# update the FPS counter
fps.update()
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop() | 28.328358 | 70 | 0.681946 |
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import pickle
import time
import cv2
import os
import requests
import json
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--detector", required=True,
help="path to OpenCV's deep learning face detector")
ap.add_argument("-m", "--embedding-model", required=True,
help="path to OpenCV's deep learning face embedding model")
ap.add_argument("-r", "--recognizer", required=True,
help="path to model trained to recognize faces")
ap.add_argument("-l", "--le", required=True,
help="path to label encoder")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
print("[INFO] loading face detector...")
protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
modelPath = os.path.sep.join([args["detector"],
"res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])
recognizer = pickle.loads(open(args["recognizer"], "rb").read())
le = pickle.loads(open(args["le"], "rb").read())
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
fps = FPS().start()
contador = 0
finded = False
isLogged = False
probability = 0.0
user=""
def handleLoggin(username):
print("""
FUISTE LOGEADO CON EXITO
HOLA CRISTIAN FABRIZIO SOTOMAYOR GONZALES
CODIGO 20162019
""")
print('username', username)
res = requests.get('http://127.0.0.1:5000/')
response = json.loads(res.text)
print('response', response)
iterar = True
while True:
frame = vs.read()
frame = imutils.resize(frame, width=600)
(h, w) = frame.shape[:2]
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(frame, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
# faces in the input image
detector.setInput(imageBlob)
detections = detector.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections
if confidence > args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for
# the face
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# extract the face ROI
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
if fW < 20 or fH < 20:
continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,
(96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
# perform classification to recognize the face
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
# draw the bounding box of the face along with the
# associated probability
if isLogged == False:
text = "{}: {:.2f}%".format(name, proba * 100)
else:
text = "{}: {:.2f}% -- LOGGED".format(user, probability)
y = startY - 10 if startY - 10 > 10 else startY + 10
if isLogged == False:
if(name == 'cristian' and proba > 0.5):
print('hola', contador)
cv2.rectangle(frame, (startX, startY), (endX, endY),
(224, 0, 0), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# finded = True
print('apagate')
# break
if(isLogged is not True):
isLogged = True
probability = proba * 100
user = name
handleLoggin(name)
else:
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
else:
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 255, 0), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
if finded:
break
contador = contador + 1
# update the FPS counter
fps.update()
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop() | true | true |
f7ff4d686ee28a251c2a6c847e436de15eae0751 | 165 | py | Python | core/wsgi.py | JohnAzedo/SocialAuthDjango | a92bb69c78083fe83190609548fc068849891b90 | [
"MIT"
] | 9 | 2021-01-21T16:26:48.000Z | 2021-09-20T01:47:58.000Z | core/wsgi.py | JohnAzedo/SocialAuthDjango | a92bb69c78083fe83190609548fc068849891b90 | [
"MIT"
] | 10 | 2019-07-29T12:13:48.000Z | 2022-02-10T08:32:59.000Z | core/wsgi.py | JohnAzedo/SocialAuthDjango | a92bb69c78083fe83190609548fc068849891b90 | [
"MIT"
] | 5 | 2021-04-11T09:40:44.000Z | 2021-06-09T08:05:48.000Z | import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
application = get_wsgi_application()
| 20.625 | 64 | 0.824242 | import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
application = get_wsgi_application()
| true | true |
f7ff50addd5f11499a683fa5f671e1eb0a96c142 | 3,787 | py | Python | build/ARM/python/m5/internal/param_DiskImage.py | Jakgn/gem5_test | 0ba7cc5213cf513cf205af7fc995cf679ebc1a3f | [
"BSD-3-Clause"
] | null | null | null | build/ARM/python/m5/internal/param_DiskImage.py | Jakgn/gem5_test | 0ba7cc5213cf513cf205af7fc995cf679ebc1a3f | [
"BSD-3-Clause"
] | null | null | null | build/ARM/python/m5/internal/param_DiskImage.py | Jakgn/gem5_test | 0ba7cc5213cf513cf205af7fc995cf679ebc1a3f | [
"BSD-3-Clause"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.11
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_param_DiskImage', [dirname(__file__)])
except ImportError:
import _param_DiskImage
return _param_DiskImage
if fp is not None:
try:
_mod = imp.load_module('_param_DiskImage', fp, pathname, description)
finally:
fp.close()
return _mod
_param_DiskImage = swig_import_helper()
del swig_import_helper
else:
import _param_DiskImage
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import m5.internal.param_SimObject
import m5.internal.drain
import m5.internal.serialize
class DiskImage(m5.internal.param_SimObject.SimObject):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
DiskImage_swigregister = _param_DiskImage.DiskImage_swigregister
DiskImage_swigregister(DiskImage)
class DiskImageParams(m5.internal.param_SimObject.SimObjectParams):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
image_file = _swig_property(_param_DiskImage.DiskImageParams_image_file_get, _param_DiskImage.DiskImageParams_image_file_set)
read_only = _swig_property(_param_DiskImage.DiskImageParams_read_only_get, _param_DiskImage.DiskImageParams_read_only_set)
def __init__(self):
this = _param_DiskImage.new_DiskImageParams()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _param_DiskImage.delete_DiskImageParams
__del__ = lambda self : None;
DiskImageParams_swigregister = _param_DiskImage.DiskImageParams_swigregister
DiskImageParams_swigregister(DiskImageParams)
| 35.064815 | 129 | 0.702667 |
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_param_DiskImage', [dirname(__file__)])
except ImportError:
import _param_DiskImage
return _param_DiskImage
if fp is not None:
try:
_mod = imp.load_module('_param_DiskImage', fp, pathname, description)
finally:
fp.close()
return _mod
_param_DiskImage = swig_import_helper()
del swig_import_helper
else:
import _param_DiskImage
del version_info
try:
_swig_property = property
except NameError:
pass
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import m5.internal.param_SimObject
import m5.internal.drain
import m5.internal.serialize
class DiskImage(m5.internal.param_SimObject.SimObject):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
DiskImage_swigregister = _param_DiskImage.DiskImage_swigregister
DiskImage_swigregister(DiskImage)
class DiskImageParams(m5.internal.param_SimObject.SimObjectParams):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
image_file = _swig_property(_param_DiskImage.DiskImageParams_image_file_get, _param_DiskImage.DiskImageParams_image_file_set)
read_only = _swig_property(_param_DiskImage.DiskImageParams_read_only_get, _param_DiskImage.DiskImageParams_read_only_set)
def __init__(self):
this = _param_DiskImage.new_DiskImageParams()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _param_DiskImage.delete_DiskImageParams
__del__ = lambda self : None;
DiskImageParams_swigregister = _param_DiskImage.DiskImageParams_swigregister
DiskImageParams_swigregister(DiskImageParams)
| true | true |
f7ff511f74e9c954b924aea6a7e1e3b20dfbe306 | 5,703 | py | Python | razor/interface.py | SRI-CSL/OCCAM | 8c498301f658fd3c158ffb6aad36dc0dd57c6239 | [
"BSD-3-Clause"
] | 17 | 2015-11-23T17:19:54.000Z | 2022-03-18T23:55:29.000Z | razor/interface.py | SRI-CSL/application-specialization | 6e3efd1431195ccb4afd0801caa8cc825931197f | [
"BSD-3-Clause"
] | 58 | 2015-11-26T17:24:12.000Z | 2021-11-30T12:47:31.000Z | razor/interface.py | SRI-CSL/application-specialization | 6e3efd1431195ccb4afd0801caa8cc825931197f | [
"BSD-3-Clause"
] | 10 | 2015-09-03T14:54:27.000Z | 2020-11-13T14:02:11.000Z | """
OCCAM
Copyright (c) 2011-2017, SRI International
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of SRI International nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The API to the protobuffer interface.
"""
#pylint: disable=E1101
import re
import sys
from .proto import Previrt_pb2 as pb
def emptyInterface():
""" Returns an empty interface.
"""
return pb.ComponentInterface()
def parseInterface(filename):
""" Parses the filename as an interface.
"""
result = pb.ComponentInterface()
if filename == '-':
result.ParseFromString(sys.stdin.read())
else:
result.ParseFromString(open(filename, 'rb').read())
return result
def writeInterface(iface, filename):
""" Writes the innterface out to the file.
"""
if isinstance(filename, str):
if filename == '-':
f = sys.stdout
else:
f = open(filename, 'wb')
else:
f = filename
f.write(iface.SerializeToString())
f.close()
def mainInterface():
""" Returns the interface for main.
"""
main = pb.ComponentInterface()
c = main.calls.add(name=str.encode('main','utf-8'), count=1)
c.args.add(type=pb.U)
c.args.add(type=pb.U)
main.references.extend([str.encode('main','utf-8')])
#iam 11/15/2016 these don't seem to be really necessary;
# or if they are necessary, then there probably should be
# a lot more, no?
atexit = main.calls.add(name=str.encode('atexit','utf-8'), count=1)
atexit.args.add(type=pb.U)
main.references.extend([str.encode('atexit','utf-8')])
#inittls = main.calls.add(name='_init_tls', count=1)
#iam: no inittls.args.add ???
main.calls.add(name=str.encode('_init_tls','utf-8'), count=1)
main.references.extend([str.encode('_init_tls','utf-8')])
exitr = main.calls.add(name=str.encode('exit', 'utf-8'), count=1)
exitr.args.add(type=pb.U)
main.references.extend([str.encode('exit','utf-8')])
return main
def joinInterfaces(into, merge):
""" Merges the first interface into the second.
"""
result = False
for mc in merge.calls:
for c in [c for c in into.calls if c.name == mc.name]:
if len(mc.args) != len(c.args):
continue
if c.args == mc.args:
c.count += mc.count
break
else:
into.calls.add(name=mc.name, args=mc.args, count=mc.count)
result = True
for mr in merge.references:
if mr in into.references:
continue
into.references.append(mr)
result = True
return result
def readInterfaceFromText(f):
""" parses the lines of f as an interface.
"""
ptrn_rest = r'(?:\s*,\s*(.*))?'
ptrn_call = re.compile(r'([^(]+)\(([^)]*)\)\s*(?::\s*([0-9]+))?')
ptrn_int = re.compile(r'i([0-9]+)\s+([0-9]+)' + ptrn_rest)
ptrn_str = re.compile(r'^"((?:[^"\\]|(?:\\"))+)"' + ptrn_rest)
ptrn_unknown = re.compile(r'^\?' + ptrn_rest)
result = pb.ComponentInterface()
for line in [x.strip() for x in f.readlines()]:
if not line:
continue
if line.startswith('#'):
continue
mtch = ptrn_call.match(line)
if mtch:
v = result.calls.add(name=mtch.group(1))
if mtch.group(3):
v.count = int(mtch.group(3))
args = mtch.group(2).strip()
while args and not args == '':
args = args.strip()
m = ptrn_unknown.match(args)
if m:
args.add(type=pb.U)
args = m.group(1)
else:
m = ptrn_int.match(args)
if m:
a = v.args.add(type=pb.I)
a.int.value = hex(int(m.group(2)))[2:]
a.int.bits = int(m.group(1))
args = m.group(3)
else:
m = ptrn_str.match(args)
if m:
a = v.args.add(type=pb.S)
a.str.data = m.group(1)
args = m.group(2)
else:
assert False
else:
print("skipping line '{0}'".format(line))
return result
| 34.355422 | 79 | 0.593372 |
import re
import sys
from .proto import Previrt_pb2 as pb
def emptyInterface():
return pb.ComponentInterface()
def parseInterface(filename):
result = pb.ComponentInterface()
if filename == '-':
result.ParseFromString(sys.stdin.read())
else:
result.ParseFromString(open(filename, 'rb').read())
return result
def writeInterface(iface, filename):
if isinstance(filename, str):
if filename == '-':
f = sys.stdout
else:
f = open(filename, 'wb')
else:
f = filename
f.write(iface.SerializeToString())
f.close()
def mainInterface():
main = pb.ComponentInterface()
c = main.calls.add(name=str.encode('main','utf-8'), count=1)
c.args.add(type=pb.U)
c.args.add(type=pb.U)
main.references.extend([str.encode('main','utf-8')])
# or if they are necessary, then there probably should be
# a lot more, no?
atexit = main.calls.add(name=str.encode('atexit','utf-8'), count=1)
atexit.args.add(type=pb.U)
main.references.extend([str.encode('atexit','utf-8')])
#inittls = main.calls.add(name='_init_tls', count=1)
#iam: no inittls.args.add ???
main.calls.add(name=str.encode('_init_tls','utf-8'), count=1)
main.references.extend([str.encode('_init_tls','utf-8')])
exitr = main.calls.add(name=str.encode('exit', 'utf-8'), count=1)
exitr.args.add(type=pb.U)
main.references.extend([str.encode('exit','utf-8')])
return main
def joinInterfaces(into, merge):
result = False
for mc in merge.calls:
for c in [c for c in into.calls if c.name == mc.name]:
if len(mc.args) != len(c.args):
continue
if c.args == mc.args:
c.count += mc.count
break
else:
into.calls.add(name=mc.name, args=mc.args, count=mc.count)
result = True
for mr in merge.references:
if mr in into.references:
continue
into.references.append(mr)
result = True
return result
def readInterfaceFromText(f):
ptrn_rest = r'(?:\s*,\s*(.*))?'
ptrn_call = re.compile(r'([^(]+)\(([^)]*)\)\s*(?::\s*([0-9]+))?')
ptrn_int = re.compile(r'i([0-9]+)\s+([0-9]+)' + ptrn_rest)
ptrn_str = re.compile(r'^"((?:[^"\\]|(?:\\"))+)"' + ptrn_rest)
ptrn_unknown = re.compile(r'^\?' + ptrn_rest)
result = pb.ComponentInterface()
for line in [x.strip() for x in f.readlines()]:
if not line:
continue
if line.startswith('
continue
mtch = ptrn_call.match(line)
if mtch:
v = result.calls.add(name=mtch.group(1))
if mtch.group(3):
v.count = int(mtch.group(3))
args = mtch.group(2).strip()
while args and not args == '':
args = args.strip()
m = ptrn_unknown.match(args)
if m:
args.add(type=pb.U)
args = m.group(1)
else:
m = ptrn_int.match(args)
if m:
a = v.args.add(type=pb.I)
a.int.value = hex(int(m.group(2)))[2:]
a.int.bits = int(m.group(1))
args = m.group(3)
else:
m = ptrn_str.match(args)
if m:
a = v.args.add(type=pb.S)
a.str.data = m.group(1)
args = m.group(2)
else:
assert False
else:
print("skipping line '{0}'".format(line))
return result
| true | true |
f7ff525cd40b5650ba27e8230fd67635baf49ce6 | 1,194 | py | Python | tests/vat_ids.py | MrMebelMan/pyvies | a0f9c1a5b17be61813bcd3a2308e64ed350df8bb | [
"MIT"
] | 1 | 2020-05-22T14:17:41.000Z | 2020-05-22T14:17:41.000Z | tests/vat_ids.py | MrMebelMan/pyvies | a0f9c1a5b17be61813bcd3a2308e64ed350df8bb | [
"MIT"
] | null | null | null | tests/vat_ids.py | MrMebelMan/pyvies | a0f9c1a5b17be61813bcd3a2308e64ed350df8bb | [
"MIT"
] | 1 | 2019-01-08T14:58:05.000Z | 2019-01-08T14:58:05.000Z | VALID_VAT_IDS = {
'AT': 'U33826303',
'CZ': '48025551',
'DE': '263104148',
'DK': '13189900',
'ES': 'B57835241',
'FR': '39518813530',
'FI': '18217187',
'GB': '248233561',
'HU': '11062361',
'HR': '59992797221',
# 'IS': '??????',
'IE': '4575414W',
'IT': '01441870449',
'LT': '100005724315',
'LU': '19112340',
'NL': '817335808B02',
'MT': '10374410',
'PT': '504791834',
'PL': '1231076587',
'RO': '9919750',
'SK': '2022293009',
'SI': '42457157',
'SE': '556132966401',
'se': ' - 556-132\t 966\n401 ',
}
INVALID_VAT_IDS = [
# garbage
'',
'a',
'aa',
'aaa',
'ab1',
'\t\n ',
'!@#$%^&*()',
# no country code
'298152355',
'40103854051',
'858556789B01',
'10341606Y',
'920 1205 85',
'283358087',
# unsupported country codes
'AA123456789',
'ZZ12345678',
'xz11111111',
# invalid/outdated vat numbers
'CZ123456789',
'NL018146077B01',
'DK38699598',
'ESB98346646',
'DK3889013',
'GB283320026',
'GB279873824',
'DE216384455',
'IS065084',
'AAAAAAAAAA',
'AT33826303',
'IT00000000',
]
| 18.090909 | 37 | 0.494137 | VALID_VAT_IDS = {
'AT': 'U33826303',
'CZ': '48025551',
'DE': '263104148',
'DK': '13189900',
'ES': 'B57835241',
'FR': '39518813530',
'FI': '18217187',
'GB': '248233561',
'HU': '11062361',
'HR': '59992797221',
'IE': '4575414W',
'IT': '01441870449',
'LT': '100005724315',
'LU': '19112340',
'NL': '817335808B02',
'MT': '10374410',
'PT': '504791834',
'PL': '1231076587',
'RO': '9919750',
'SK': '2022293009',
'SI': '42457157',
'SE': '556132966401',
'se': ' - 556-132\t 966\n401 ',
}
INVALID_VAT_IDS = [
'',
'a',
'aa',
'aaa',
'ab1',
'\t\n ',
'!@#$%^&*()',
'298152355',
'40103854051',
'858556789B01',
'10341606Y',
'920 1205 85',
'283358087',
'AA123456789',
'ZZ12345678',
'xz11111111',
'CZ123456789',
'NL018146077B01',
'DK38699598',
'ESB98346646',
'DK3889013',
'GB283320026',
'GB279873824',
'DE216384455',
'IS065084',
'AAAAAAAAAA',
'AT33826303',
'IT00000000',
]
| true | true |
f7ff52774f076f79502f0471950e30e96f1b1c13 | 4,024 | py | Python | Univ_individual_files/313_Bandung_Institute_of_Technology_ITB.py | srsarangi/univscanner | 8735628690f7f50662ee5abf14ac1d27a657b613 | [
"Apache-2.0"
] | null | null | null | Univ_individual_files/313_Bandung_Institute_of_Technology_ITB.py | srsarangi/univscanner | 8735628690f7f50662ee5abf14ac1d27a657b613 | [
"Apache-2.0"
] | null | null | null | Univ_individual_files/313_Bandung_Institute_of_Technology_ITB.py | srsarangi/univscanner | 8735628690f7f50662ee5abf14ac1d27a657b613 | [
"Apache-2.0"
] | 2 | 2021-05-18T07:50:15.000Z | 2021-05-18T11:16:04.000Z | import requests
import urllib.request
import time
import urllib
import re
import csv
from bs4 import BeautifulSoup
def Bandung_Institute_of_Technology():
url = "https://www.unimi.it/en/ugov/ou-structure/department-computer-science-giovanni-degli-antoni" # homepage url
r = requests.get(url) # request to url
# getting the soup by parsing the html parsel to text to request r
soup = BeautifulSoup(r.text, "html.parser")
# file initialization to write
filename = "bandung.txt"
f = open(filename, "w")
excel_filename = "bandung.csv"
f2 = open(excel_filename, "w")
csvwriter = csv.writer(f2)
overall_file = "all_emails.csv"
f3 = open(overall_file, "a")
csvwriter2 = csv.writer(f3)
u_name = "Bandung Institute of Technology (ITB)"
country = "Indonesia"
garbage_emails = []
var = [f, csvwriter, csvwriter2, u_name, country]
# d gives the array of all profs on the dept homepage
d = soup.find_all('span', {'class':'field-content icon rubrica flex'})
# dd = d.find_all('div', {'class': 'views-field views-field-title'})
# print(d)
#iterating for every prof
for i in d:
a = i.find('a') # a contains the name and the homepage of prof
link = "https://www.unimi.it" + a.get('href') # extracting prof page link
name = a.get_text() # extracting prof name
# print(name, "\t", link)
name = name.strip()
try:
prof_resp = requests.get(link)
except:
continue
email = "Not Found"
print(name, link)
filterandgetEmail(var, garbage_emails, name, link, email, prof_resp)
f.close()
f2.close()
f3.close()
print("Finished")
def filterandgetEmail(var, garbage_emails, name, link, email, prof_resp):
f = var[0]
csvwriter = var[1]
csvwriter2 = var[2]
u_name = var[3]
country = var[4]
keyword_list = ['Computer architecture','computer architecture','Computer Architecture', 'Hardware And System Architecture', 'hardware and system architecture',
'Hardware and Architecture', 'hardware and architecture', 'embedded system', 'Embedded System','Computer Organization','VLSI', 'Computer and System',
'Distributed System', 'distributed system', 'Distributed system' ]
flag = 1
prof_soup = BeautifulSoup(prof_resp.text, "html.parser")
research_text = prof_soup.text
for pattern in keyword_list:
#print(pattern)
if re.search(pattern,research_text):
flag = 0
if email != 'Not Found':
f.write(link + '\n' + name + "\t"+ email + "\n")
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
else:
new_emails = set(re.findall(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}", prof_resp.text))
for eemail in garbage_emails:
if eemail in new_emails:
new_emails.remove(eemail)
if len(new_emails) == 0:
email = "Email Not Found"
f.write(link + '\n' + name + "\t"+ email + "\n")
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
else:
# f.write(link + '\n' + name)
for email in new_emails:
f.write(link + '\n' + name + '\t\t' + email + '\n')
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
# f.write("\n")
f.write(pattern)
f.write('\n\n')
break
if __name__ == '__main__':
Bandung_Institute_of_Technology() | 35.928571 | 165 | 0.565606 | import requests
import urllib.request
import time
import urllib
import re
import csv
from bs4 import BeautifulSoup
def Bandung_Institute_of_Technology():
url = "https://www.unimi.it/en/ugov/ou-structure/department-computer-science-giovanni-degli-antoni"
r = requests.get(url)
soup = BeautifulSoup(r.text, "html.parser")
filename = "bandung.txt"
f = open(filename, "w")
excel_filename = "bandung.csv"
f2 = open(excel_filename, "w")
csvwriter = csv.writer(f2)
overall_file = "all_emails.csv"
f3 = open(overall_file, "a")
csvwriter2 = csv.writer(f3)
u_name = "Bandung Institute of Technology (ITB)"
country = "Indonesia"
garbage_emails = []
var = [f, csvwriter, csvwriter2, u_name, country]
d = soup.find_all('span', {'class':'field-content icon rubrica flex'})
for i in d:
a = i.find('a')
link = "https://www.unimi.it" + a.get('href')
name = a.get_text()
name = name.strip()
try:
prof_resp = requests.get(link)
except:
continue
email = "Not Found"
print(name, link)
filterandgetEmail(var, garbage_emails, name, link, email, prof_resp)
f.close()
f2.close()
f3.close()
print("Finished")
def filterandgetEmail(var, garbage_emails, name, link, email, prof_resp):
f = var[0]
csvwriter = var[1]
csvwriter2 = var[2]
u_name = var[3]
country = var[4]
keyword_list = ['Computer architecture','computer architecture','Computer Architecture', 'Hardware And System Architecture', 'hardware and system architecture',
'Hardware and Architecture', 'hardware and architecture', 'embedded system', 'Embedded System','Computer Organization','VLSI', 'Computer and System',
'Distributed System', 'distributed system', 'Distributed system' ]
flag = 1
prof_soup = BeautifulSoup(prof_resp.text, "html.parser")
research_text = prof_soup.text
for pattern in keyword_list:
if re.search(pattern,research_text):
flag = 0
if email != 'Not Found':
f.write(link + '\n' + name + "\t"+ email + "\n")
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
else:
new_emails = set(re.findall(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}", prof_resp.text))
for eemail in garbage_emails:
if eemail in new_emails:
new_emails.remove(eemail)
if len(new_emails) == 0:
email = "Email Not Found"
f.write(link + '\n' + name + "\t"+ email + "\n")
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
else:
for email in new_emails:
f.write(link + '\n' + name + '\t\t' + email + '\n')
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
f.write(pattern)
f.write('\n\n')
break
if __name__ == '__main__':
Bandung_Institute_of_Technology() | true | true |
f7ff53063f5fd0f44da341b2601141765f4c6b1a | 1,280 | py | Python | project_test/tests/test_002_forms.py | emencia/emencia-django-bazar | a0cf56c00988c84c2288c21fa2a08364fc5033aa | [
"MIT"
] | null | null | null | project_test/tests/test_002_forms.py | emencia/emencia-django-bazar | a0cf56c00988c84c2288c21fa2a08364fc5033aa | [
"MIT"
] | 11 | 2015-05-06T14:50:14.000Z | 2017-12-16T23:46:17.000Z | project_test/tests/test_002_forms.py | emencia/emencia-django-bazar | a0cf56c00988c84c2288c21fa2a08364fc5033aa | [
"MIT"
] | null | null | null | import pytest
import factory
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from bazar.models import Entity, Note
from bazar.forms.entity import EntityForm
from bazar.forms.note import NoteForm
import factories
@pytest.mark.django_db
def test_form_entity(admin_client):
"""Simple test on Entity form"""
entity_datas = factory.build(dict, FACTORY_CLASS=factories.EntityFormFactory)
form = EntityForm(data=entity_datas)
assert form.is_valid() == True
form.save()
assert Entity.objects.count() == 1
@pytest.mark.django_db
def test_form_note(admin_client):
"""Simple test on Note form"""
factory_entity = factories.EntityModelFactory()
note_datas = factory.build(dict, FACTORY_CLASS=factories.NoteFormFactory)
# Save related object since we used build()
author = note_datas.pop('author')
entity = note_datas.pop('entity')
author.save()
entity.save()
form = NoteForm(author=author, entity=entity, data=note_datas)
# Submitted field values are valid
assert form.is_valid() == True
# Save note object
note_instance = form.save()
# Ensure foreignkey has been saved
assert note_instance.author == author
assert note_instance.entity == entity
| 24.615385 | 81 | 0.733594 | import pytest
import factory
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from bazar.models import Entity, Note
from bazar.forms.entity import EntityForm
from bazar.forms.note import NoteForm
import factories
@pytest.mark.django_db
def test_form_entity(admin_client):
entity_datas = factory.build(dict, FACTORY_CLASS=factories.EntityFormFactory)
form = EntityForm(data=entity_datas)
assert form.is_valid() == True
form.save()
assert Entity.objects.count() == 1
@pytest.mark.django_db
def test_form_note(admin_client):
factory_entity = factories.EntityModelFactory()
note_datas = factory.build(dict, FACTORY_CLASS=factories.NoteFormFactory)
author = note_datas.pop('author')
entity = note_datas.pop('entity')
author.save()
entity.save()
form = NoteForm(author=author, entity=entity, data=note_datas)
assert form.is_valid() == True
note_instance = form.save()
assert note_instance.author == author
assert note_instance.entity == entity
| true | true |
f7ff534e64913b93a7a9d2636e932ed76b8bced3 | 933 | py | Python | manage.py | praiseG/DABS-Django-Backend | 8c51e4ef5ab12e1bc2f1c51423c21e3090a7e2f6 | [
"MIT"
] | 1 | 2021-08-17T15:19:08.000Z | 2021-08-17T15:19:08.000Z | manage.py | praiseG/DABS-Django-Backend | 8c51e4ef5ab12e1bc2f1c51423c21e3090a7e2f6 | [
"MIT"
] | null | null | null | manage.py | praiseG/DABS-Django-Backend | 8c51e4ef5ab12e1bc2f1c51423c21e3090a7e2f6 | [
"MIT"
] | 3 | 2019-12-19T20:50:44.000Z | 2021-05-14T14:43:23.000Z | #!/usr/bin/env python
import os
import sys
import dotenv
if __name__ == "__main__":
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
dotenv.read_dotenv(os.path.join(BASE_PATH, '.env'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dabs.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 33.321429 | 77 | 0.651661 |
import os
import sys
import dotenv
if __name__ == "__main__":
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
dotenv.read_dotenv(os.path.join(BASE_PATH, '.env'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dabs.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| true | true |
f7ff53fec1489838d4bae06123009c7891e40f1e | 236,495 | py | Python | cinder/tests/unit/test_emc_vnxdirect.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/test_emc_vnxdirect.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/test_emc_vnxdirect.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 - 2015 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import mock
from oslo_concurrency import processutils
import six
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.emc import emc_cli_fc
from cinder.volume.drivers.emc import emc_cli_iscsi
from cinder.volume.drivers.emc import emc_vnx_cli
from cinder.zonemanager import fc_san_lookup_service as fc_service
from mock import patch
SUCCEED = ("", 0)
FAKE_ERROR_RETURN = ("FAKE ERROR", 255)
VERSION = emc_vnx_cli.EMCVnxCliBase.VERSION
class EMCVNXCLIDriverTestData(object):
test_volume = {
'status': 'creating',
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'provider_location': 'system^FNM11111|type^lun|id^1|version^05.03.00',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None
}
test_legacy_volume = {
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'provider_location': 'system^FNM11111|type^lun|id^1',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None
}
test_volume_clone_cg = {
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None,
'provider_location': 'system^FNM11111|type^lun|id^1',
}
test_volume_cg = {
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': 'cg_id'
}
test_volume_rw = {
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None,
'provider_location': 'system^FNM11111|type^lun|id^1|version^05.03.00',
}
test_volume2 = {
'name': 'vol2',
'size': 1,
'volume_name': 'vol2',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol2',
'consistencygroup_id': None,
'display_description': 'test volume',
'volume_type_id': None,
'provider_location': 'system^FNM11111|type^lun|id^1|version^05.03.00',
'volume_metadata': [{'key': 'lun_type', 'value': 'lun'}]}
volume_in_cg = {
'name': 'vol2',
'size': 1,
'volume_name': 'vol2',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol1_in_cg',
'provider_location': 'system^FNM11111|type^lun|id^1',
'consistencygroup_id': 'consistencygroup_id',
'display_description': 'test volume',
'volume_type_id': None}
volume2_in_cg = {
'name': 'vol2',
'size': 1,
'volume_name': 'vol2',
'id': '3',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol2_in_cg',
'provider_location': 'system^FNM11111|type^lun|id^3',
'consistencygroup_id': 'consistencygroup_id',
'display_description': 'test volume',
'volume_type_id': None}
test_volume_with_type = {
'name': 'vol_with_type',
'size': 1,
'volume_name': 'vol_with_type',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'thin_vol',
'consistencygroup_id': None,
'display_description': 'vol with type',
'volume_type_id': 'abc1-2320-9013-8813-8941-1374-8112-1231',
'provider_location': 'system^FNM11111|type^lun|id^1'}
test_failed_volume = {
'name': 'failed_vol1',
'size': 1,
'volume_name': 'failed_vol1',
'id': '4',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'failed_vol',
'consistencygroup_id': None,
'display_description': 'test failed volume',
'volume_type_id': None}
test_volume1_in_sg = {
'name': 'vol1_in_sg',
'size': 1,
'volume_name': 'vol1_in_sg',
'id': '4',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'failed_vol',
'display_description': 'Volume 1 in SG',
'volume_type_id': None,
'provider_location': 'system^fakesn|type^lun|id^4|version^05.03.00'}
test_volume2_in_sg = {
'name': 'vol2_in_sg',
'size': 1,
'volume_name': 'vol2_in_sg',
'id': '5',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'failed_vol',
'display_description': 'Volume 2 in SG',
'volume_type_id': None,
'provider_location': 'system^fakesn|type^lun|id^3|version^05.03.00'}
test_snapshot = {
'name': 'snapshot1',
'size': 1,
'id': '4444',
'volume_name': 'vol1',
'volume': test_volume,
'volume_size': 1,
'consistencygroup_id': None,
'cgsnapshot_id': None,
'project_id': 'project'}
test_failed_snapshot = {
'name': 'failed_snapshot',
'size': 1,
'id': '5555',
'volume_name': 'vol-vol1',
'volume': test_volume,
'volume_size': 1,
'project_id': 'project'}
test_clone = {
'name': 'clone1',
'size': 1,
'id': '2',
'volume_name': 'vol1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'clone1',
'consistencygroup_id': None,
'display_description': 'volume created from snapshot',
'volume_type_id': '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
'provider_location': 'system^fakesn|type^lun|id^2|version^05.03.00'}
test_clone_cg = {
'name': 'clone1',
'size': 1,
'id': '2',
'volume_name': 'vol1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'clone1',
'consistencygroup_id': 'consistencygroup_id',
'display_description': 'volume created from snapshot',
'volume_type_id': None,
'provider_location': 'system^fakesn|type^lun|id^2|version^05.03.00'}
connector = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': ["1234567890123456", "1234567890543216"],
'wwnns': ["2234567890123456", "2234567890543216"],
'host': 'fakehost'}
test_volume3 = {
'migration_status': None, 'availability_zone': 'nova',
'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
'name': 'vol3',
'size': 2,
'status': 'available',
'volume_type_id':
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
'deleted': False,
'host': "host@backendsec#unit_test_pool",
'source_volid': None, 'provider_auth': None,
'display_name': 'vol-test02',
'attach_status': 'detached',
'volume_type': [],
'volume_attachment': [],
'provider_location':
'system^FNM11111|type^lun|id^1|version^05.03.00',
'_name_id': None, 'volume_metadata': []}
test_new_type = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning': 'thin'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
test_diff = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provisioning': ('thick', 'thin')}}
test_host = {'host': 'ubuntu-server12@pool_backend_1#POOL_SAS1',
'capabilities':
{'pool_name': 'POOL_SAS1',
'location_info': 'POOL_SAS1|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
test_volume4 = {'migration_status': None, 'availability_zone': 'nova',
'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
'name': 'vol4',
'size': 2,
'status': 'available',
'volume_type_id':
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
'deleted': False, 'provider_location':
'system^FNM11111|type^lun|id^4',
'host': 'ubuntu-server12@array_backend_1',
'source_volid': None, 'provider_auth': None,
'display_name': 'vol-test02',
'volume_attachment': [],
'attach_status': 'detached',
'volume_type': [],
'_name_id': None, 'volume_metadata': []}
test_volume5 = {'migration_status': None, 'availability_zone': 'nova',
'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25',
'name_id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25',
'name': 'vol5',
'size': 1,
'status': 'available',
'volume_type_id':
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
'deleted': False, 'provider_location':
'system^FNM11111|type^lun|id^5|version^05.02.00',
'host': 'ubuntu-server12@array_backend_1#unit_test_pool',
'source_volid': None, 'provider_auth': None,
'display_name': 'vol-test05',
'volume_attachment': [],
'attach_status': 'detached',
'volume_type': [],
'_name_id': None, 'volume_metadata': []}
test_new_type2 = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:pool': 'POOL_SAS2'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
test_diff2 = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:pool': ('POOL_SAS1', 'POOL_SAS2')}}
test_host2 = {'host': 'ubuntu-server12@array_backend_1',
'capabilities':
{'location_info': '|FNM00124500890',
'volume_backend_name': 'array_backend_1',
'storage_protocol': 'iSCSI'}}
test_cg = {'id': 'consistencygroup_id',
'name': 'group_name',
'status': 'deleting'}
test_cg_with_type = {'id': 'consistencygroup_id',
'name': 'group_name',
'status': 'creating',
'volume_type_id':
'abc1-2320-9013-8813-8941-1374-8112-1231,'
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8,'}
test_cgsnapshot = {
'consistencygroup_id': 'consistencygroup_id',
'id': 'cgsnapshot_id',
'status': 'available'}
test_member_cgsnapshot = {
'name': 'snapshot1',
'size': 1,
'id': 'cgsnapshot_id',
'volume': test_volume,
'volume_name': 'vol1',
'volume_size': 1,
'consistencygroup_id': 'consistencygroup_id',
'cgsnapshot_id': 'cgsnapshot_id',
'project_id': 'project'
}
test_lun_id = 1
test_existing_ref = {'source-id': test_lun_id}
test_existing_ref_source_name = {'source-name': 'vol1'}
test_pool_name = 'unit_test_pool'
device_map = {
'1122334455667788': {
'initiator_port_wwn_list': ['123456789012345', '123456789054321'],
'target_port_wwn_list': ['1122334455667777']}}
i_t_map = {'123456789012345': ['1122334455667777'],
'123456789054321': ['1122334455667777']}
POOL_PROPERTY_CMD = ('storagepool', '-list', '-name', 'unit_test_pool',
'-userCap', '-availableCap',
'-state', '-prcntFullThreshold')
POOL_PROPERTY_W_FASTCACHE_CMD = ('storagepool', '-list', '-name',
'unit_test_pool', '-availableCap',
'-userCap', '-state',
'-subscribedCap',
'-prcntFullThreshold',
'-fastcache')
def POOL_GET_ALL_CMD(self, withfastcache=False):
if withfastcache:
return ('storagepool', '-list', '-availableCap',
'-userCap', '-state', '-subscribedCap',
'-prcntFullThreshold',
'-fastcache')
else:
return ('storagepool', '-list', '-availableCap',
'-userCap', '-state', '-subscribedCap',
'-prcntFullThreshold')
def POOL_GET_ALL_RESULT(self, withfastcache=False):
if withfastcache:
return ("Pool Name: unit_test_pool\n"
"Pool ID: 0\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 6881061888\n"
"User Capacity (GBs): 3281.146\n"
"Available Capacity (Blocks): 6512292864\n"
"Available Capacity (GBs): 3105.303\n"
"Total Subscribed Capacity (GBs): 536.140\n"
"FAST Cache: Enabled\n"
"State: Ready\n"
"\n"
"Pool Name: unit_test_pool2\n"
"Pool ID: 1\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 8598306816\n"
"User Capacity (GBs): 4099.992\n"
"Available Capacity (Blocks): 8356663296\n"
"Available Capacity (GBs): 3984.768\n"
"Total Subscribed Capacity (GBs): 636.240\n"
"FAST Cache: Disabled\n"
"State: Ready\n", 0)
else:
return ("Pool Name: unit_test_pool\n"
"Pool ID: 0\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 6881061888\n"
"User Capacity (GBs): 3281.146\n"
"Available Capacity (Blocks): 6512292864\n"
"Available Capacity (GBs): 3105.303\n"
"Total Subscribed Capacity (GBs): 536.140\n"
"State: Ready\n"
"\n"
"Pool Name: unit_test_pool2\n"
"Pool ID: 1\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 8598306816\n"
"User Capacity (GBs): 4099.992\n"
"Available Capacity (Blocks): 8356663296\n"
"Available Capacity (GBs): 3984.768\n"
"Total Subscribed Capacity (GBs): 636.240\n"
"State: Ready\n", 0)
def POOL_GET_STATE_RESULT(self, pools):
output = []
for i, po in enumerate(pools):
if i != 0:
output.append("\n")
output.append("Pool Name: %s" % po['pool_name'])
output.append("Pool ID: %s" % i)
output.append("State: %s" % po['state'])
return ("\n".join(output), 0)
def POOL_GET_ALL_STATES_TEST(self, states=['Ready']):
output = ""
for i, stat in enumerate(states):
out = ("Pool Name: Pool_" + str(i) + "\n"
"Pool ID: " + str(i) + "\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 8598306816\n"
"User Capacity (GBs): 4099.992\n"
"Available Capacity (Blocks): 8356663296\n"
"Available Capacity (GBs): 3984.768\n"
"FAST Cache: Enabled\n"
"State: " + stat + "\n\n")
output += out
return (output, 0)
def SNAP_NOT_EXIST(self):
return ("Could not retrieve the specified (Snapshot).\n "
"The (Snapshot) may not exist", 9)
NDU_LIST_CMD = ('ndu', '-list')
NDU_LIST_RESULT = ("Name of the software package: -Compression " +
"Name of the software package: -Deduplication " +
"Name of the software package: -FAST " +
"Name of the software package: -FASTCache " +
"Name of the software package: -ThinProvisioning "
"Name of the software package: -VNXSnapshots",
0)
NDU_LIST_RESULT_WO_LICENSE = (
"Name of the software package: -Unisphere ",
0)
MIGRATE_PROPERTY_MIGRATING = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATING
Percent Complete: 50
Time Remaining: 0 second(s)
"""
MIGRATE_PROPERTY_STOPPED = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: STOPPED - Destination full
Percent Complete: 60
Time Remaining: 0 second(s)
"""
def SNAP_MP_CREATE_CMD(self, name='vol1', source='vol1'):
return ('lun', '-create', '-type', 'snap', '-primaryLunName',
source, '-name', name)
def SNAP_ATTACH_CMD(self, name='vol1', snapName='snapshot1'):
return ('lun', '-attach', '-name', name, '-snapName', snapName)
def SNAP_DELETE_CMD(self, name):
return ('snap', '-destroy', '-id', name, '-o')
def SNAP_CREATE_CMD(self, name):
return ('snap', '-create', '-res', 1, '-name', name,
'-allowReadWrite', 'yes',
'-allowAutoDelete', 'no')
def SNAP_MODIFY_CMD(self, name, rw):
return ('snap', '-modify', '-id', name, '-allowReadWrite', rw,
'-allowAutoDelete', 'yes')
def SNAP_LIST_CMD(self, res_id=1):
cmd = ('snap', '-list', '-res', res_id)
return cmd
def LUN_DELETE_CMD(self, name):
return ('lun', '-destroy', '-name', name, '-forceDetach', '-o')
def LUN_EXTEND_CMD(self, name, newsize):
return ('lun', '-expand', '-name', name, '-capacity', newsize,
'-sq', 'gb', '-o', '-ignoreThresholds')
def LUN_PROPERTY_POOL_CMD(self, lunname):
return ('lun', '-list', '-name', lunname, '-poolName')
def LUN_PROPERTY_ALL_CMD(self, lunname):
return ('lun', '-list', '-name', lunname,
'-state', '-status', '-opDetails', '-userCap', '-owner',
'-attachedSnapshot')
def MIGRATION_CMD(self, src_id=1, dest_id=1):
cmd = ("migrate", "-start", "-source", src_id, "-dest", dest_id,
"-rate", "high", "-o")
return cmd
def MIGRATION_VERIFY_CMD(self, src_id):
return ("migrate", "-list", "-source", src_id)
def MIGRATION_CANCEL_CMD(self, src_id):
return ("migrate", "-cancel", "-source", src_id, '-o')
def GETPORT_CMD(self):
return ("connection", "-getport", "-address", "-vlanid")
def PINGNODE_CMD(self, sp, portid, vportid, ip):
return ("connection", "-pingnode", "-sp", sp, '-portid', portid,
"-vportid", vportid, "-address", ip, '-count', '1')
def GETFCPORT_CMD(self):
return ('port', '-list', '-sp')
def CONNECTHOST_CMD(self, hostname, gname):
return ('storagegroup', '-connecthost',
'-host', hostname, '-gname', gname, '-o')
def ENABLE_COMPRESSION_CMD(self, lun_id):
return ('compression', '-on',
'-l', lun_id, '-ignoreThresholds', '-o')
def STORAGEGROUP_LIST_CMD(self, gname=None):
if gname:
return ('storagegroup', '-list',
'-gname', gname, '-host', '-iscsiAttributes')
else:
return ('storagegroup', '-list')
def STORAGEGROUP_REMOVEHLU_CMD(self, gname, hlu):
return ('storagegroup', '-removehlu',
'-hlu', hlu, '-gname', gname, '-o')
def SNAP_COPY_CMD(self, src_snap, snap_name):
return ('snap', '-copy', '-id', src_snap, '-name', snap_name,
'-ignoreMigrationCheck', '-ignoreDeduplicationCheck')
def ALLOW_READWRITE_ON_SNAP_CMD(self, snap_name):
return ('snap', '-modify', '-id', snap_name,
'-allowReadWrite', 'yes', '-allowAutoDelete', 'yes')
provisioning_values = {
'thin': ['-type', 'Thin'],
'thick': ['-type', 'NonThin'],
'compressed': ['-type', 'Thin'],
'deduplicated': ['-type', 'Thin', '-deduplication', 'on']}
tiering_values = {
'starthighthenauto': [
'-initialTier', 'highestAvailable',
'-tieringPolicy', 'autoTier'],
'auto': [
'-initialTier', 'optimizePool',
'-tieringPolicy', 'autoTier'],
'highestavailable': [
'-initialTier', 'highestAvailable',
'-tieringPolicy', 'highestAvailable'],
'lowestavailable': [
'-initialTier', 'lowestAvailable',
'-tieringPolicy', 'lowestAvailable'],
'nomovement': [
'-initialTier', 'optimizePool',
'-tieringPolicy', 'noMovement']}
def LUN_CREATION_CMD(self, name, size, pool, provisioning, tiering,
ignore_thresholds=False, poll=True):
initial = ['lun', '-create',
'-capacity', size,
'-sq', 'gb',
'-poolName', pool,
'-name', name]
if not poll:
initial = ['-np'] + initial
if provisioning:
initial.extend(self.provisioning_values[provisioning])
else:
initial.extend(self.provisioning_values['thick'])
if tiering:
initial.extend(self.tiering_values[tiering])
if ignore_thresholds:
initial.append('-ignoreThresholds')
return tuple(initial)
def CHECK_FASTCACHE_CMD(self, storage_pool):
return ('storagepool', '-list', '-name',
storage_pool, '-fastcache')
def CREATE_CONSISTENCYGROUP_CMD(self, cg_name, members=None):
create_cmd = ('snap', '-group', '-create',
'-name', cg_name, '-allowSnapAutoDelete', 'no')
if not members:
return create_cmd
else:
return create_cmd + ('-res', ','.join(map(six.text_type,
members)))
def DELETE_CONSISTENCYGROUP_CMD(self, cg_name):
return ('-np', 'snap', '-group', '-destroy',
'-id', cg_name)
def ADD_LUN_TO_CG_CMD(self, cg_name, lun_id):
return ('snap', '-group',
'-addmember', '-id', cg_name, '-res', lun_id)
def CREATE_CG_SNAPSHOT(self, cg_name, snap_name):
return ('-np', 'snap', '-create', '-res', cg_name,
'-resType', 'CG', '-name', snap_name, '-allowReadWrite',
'yes', '-allowAutoDelete', 'no')
def DELETE_CG_SNAPSHOT(self, snap_name):
return ('-np', 'snap', '-destroy', '-id', snap_name, '-o')
def GET_CG_BY_NAME_CMD(self, cg_name):
return ('snap', '-group', '-list', '-id', cg_name)
def GET_SNAP(self, snap_name):
return ('snap', '-list', '-id', snap_name)
def REMOVE_LUNS_FROM_CG_CMD(self, cg_name, remove_ids):
return ('snap', '-group', '-rmmember', '-id', cg_name, '-res',
','.join(remove_ids))
def REPLACE_LUNS_IN_CG_CMD(self, cg_name, new_ids):
return ('snap', '-group', '-replmember', '-id', cg_name, '-res',
','.join(new_ids))
def CONSISTENCY_GROUP_VOLUMES(self):
volumes = []
volumes.append(self.test_volume)
volumes.append(self.test_volume)
return volumes
def SNAPS_IN_SNAP_GROUP(self):
snaps = []
snaps.append(self.test_snapshot)
snaps.append(self.test_snapshot)
return snaps
def VOLUMES_NOT_IN_CG(self):
add_volumes = []
add_volumes.append(self.test_volume4)
add_volumes.append(self.test_volume5)
return add_volumes
def VOLUMES_IN_CG(self):
remove_volumes = []
remove_volumes.append(self.volume_in_cg)
remove_volumes.append(self.volume2_in_cg)
return remove_volumes
def CG_PROPERTY(self, cg_name):
return """
Name: %(cg_name)s
Description:
Allow auto delete: No
Member LUN ID(s): 1, 3
State: Ready
""" % {'cg_name': cg_name}, 0
def CG_NOT_FOUND(self):
return ("Cannot find the consistency group. \n\n", 13)
def CG_REPL_ERROR(self):
return """
The specified LUN is already a member
of another consistency group. (0x716d8045)
""", 71
def LUN_PREP_ERROR(self):
return ("The operation cannot be performed because "
"the LUN is 'Preparing'. Wait for the LUN's "
"Current Operation to complete 'Preparing' "
"and retry the operation. (0x712d8e0e)", 14)
POOL_PROPERTY = (
"Pool Name: unit_test_pool\n"
"Pool ID: 1\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 6881061888\n"
"User Capacity (GBs): 3281.146\n"
"Available Capacity (Blocks): 6832207872\n"
"Available Capacity (GBs): 3257.851\n"
"State: Ready\n"
"\n", 0)
POOL_PROPERTY_W_FASTCACHE = (
"Pool Name: unit_test_pool\n"
"Pool ID: 1\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 6881061888\n"
"User Capacity (GBs): 3281.146\n"
"Available Capacity (Blocks): 6832207872\n"
"Available Capacity (GBs): 3257.851\n"
"Total Subscribed Capacity (GBs): 636.240\n"
"FAST Cache: Enabled\n"
"State: Ready\n\n", 0)
ALL_PORTS = ("SP: A\n" +
"Port ID: 4\n" +
"Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a4\n" +
"iSCSI Alias: 0215.a4\n\n" +
"Virtual Port ID: 0\n" +
"VLAN ID: Disabled\n" +
"IP Address: 10.244.214.118\n\n" +
"SP: A\n" +
"Port ID: 5\n" +
"Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a5\n" +
"iSCSI Alias: 0215.a5\n" +
"SP: A\n" +
"Port ID: 0\n" +
"Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a0\n" +
"iSCSI Alias: 0215.a0\n\n" +
"Virtual Port ID: 0\n" +
"VLAN ID: Disabled\n" +
"IP Address: 10.244.214.119\n\n" +
"SP: B\n" +
"Port ID: 2\n" +
"Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.b2\n" +
"iSCSI Alias: 0215.b2\n\n" +
"Virtual Port ID: 0\n" +
"VLAN ID: Disabled\n" +
"IP Address: 10.244.214.120\n\n", 0)
WHITE_LIST_PORTS = ("""SP: A
Port ID: 0
Port WWN: iqn.1992-04.com.emc:cx.fnmxxx.a0
iSCSI Alias: 0235.a7
Virtual Port ID: 0
VLAN ID: Disabled
IP Address: 192.168.3.52
SP: A
Port ID: 9
Port WWN: iqn.1992-04.com.emc:cx.fnmxxx.a9
iSCSI Alias: 0235.a9
SP: A
Port ID: 4
Port WWN: iqn.1992-04.com.emc:cx.fnmxxx.a4
iSCSI Alias: 0235.a4
SP: B
Port ID: 2
Port WWN: iqn.1992-04.com.emc:cx.fnmxxx.b2
iSCSI Alias: 0235.b6
Virtual Port ID: 0
VLAN ID: Disabled
IP Address: 192.168.4.53
""", 0)
iscsi_connection_info = {
'data': {'target_discovered': True,
'target_iqn':
'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
'target_lun': 2,
'target_portal': '10.244.214.118:3260',
'target_iqns': ['iqn.1992-04.com.emc:cx.fnm00124000215.a4'],
'target_luns': [2],
'target_portals': ['10.244.214.118:3260'],
'volume_id': '1'},
'driver_volume_type': 'iscsi'}
iscsi_connection_info_mp = {
'data': {'target_discovered': True,
'target_iqns': [
'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
'iqn.1992-04.com.emc:cx.fnm00124000215.a5'],
'target_iqn': 'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
'target_luns': [2, 2],
'target_lun': 2,
'target_portals': [
'10.244.214.118:3260',
'10.244.214.119:3260'],
'target_portal': '10.244.214.118:3260',
'volume_id': '1'},
'driver_volume_type': 'iscsi'}
PING_OK = ("Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" +
"Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" +
"Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" +
"Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n", 0)
FC_PORTS = ("Information about each SPPORT:\n" +
"\n" +
"SP Name: SP A\n" +
"SP Port ID: 0\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:60:08:60:01:95\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: YES\n" +
"Switch UID: 10:00:00:05:1E:72:EC:A6:" +
"20:46:00:05:1E:72:EC:A6\n" +
"SP Source ID: 272896\n" +
"\n" +
"SP Name: SP B\n" +
"SP Port ID: 4\n" +
"SP UID: iqn.1992-04.com.emc:cx." +
"fnm00124000215.b4\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: Not Applicable\n" +
"\n" +
"SP Name: SP A\n" +
"SP Port ID: 2\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:62:08:60:01:95\n" +
"Link Status: Down\n" +
"Port Status: Online\n" +
"Switch Present: NO\n" +
"\n" +
"SP Name: SP B\n" +
"SP Port ID: 2\n" +
"SP UID: 50:06:01:60:88:60:08:0F:"
"50:06:01:6A:08:60:08:0F\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: YES\n" +
"Switch UID: 10:00:50:EB:1A:03:3F:59:"
"20:11:50:EB:1A:03:3F:59\n" +
"SP Source ID: 69888\n", 0)
FAKEHOST_PORTS = (
"Information about each HBA:\n" +
"\n" +
"HBA UID: 20:00:00:90:FA:53:46:41:12:34:" +
"56:78:90:12:34:56\n" +
"Server Name: fakehost\n" +
"Server IP Address: 10.0.0.2" +
"HBA Model Description:\n" +
"HBA Vendor Description:\n" +
"HBA Device Driver Name:\n" +
"Information about each port of this HBA:\n\n" +
" SP Name: SP A\n" +
" SP Port ID: 0\n" +
" HBA Devicename:\n" +
" Trusted: NO\n" +
" Logged In: YES\n" +
" Defined: YES\n" +
" Initiator Type: 3\n" +
" StorageGroup Name: fakehost\n\n" +
" SP Name: SP A\n" +
" SP Port ID: 2\n" +
" HBA Devicename:\n" +
" Trusted: NO\n" +
" Logged In: YES\n" +
" Defined: YES\n" +
" Initiator Type: 3\n" +
" StorageGroup Name: fakehost\n\n" +
" SP Name: SP B\n" +
" SP Port ID: 2\n" +
" HBA Devicename:\n" +
" Trusted: NO\n" +
" Logged In: YES\n" +
" Defined: YES\n" +
" Initiator Type: 3\n" +
" StorageGroup Name: fakehost\n\n"
"Information about each SPPORT:\n" +
"\n" +
"SP Name: SP A\n" +
"SP Port ID: 0\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:60:08:60:01:95\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: YES\n" +
"Switch UID: 10:00:00:05:1E:72:EC:A6:" +
"20:46:00:05:1E:72:EC:A6\n" +
"SP Source ID: 272896\n" +
"\n" +
"SP Name: SP B\n" +
"SP Port ID: 4\n" +
"SP UID: iqn.1992-04.com.emc:cx." +
"fnm00124000215.b4\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: Not Applicable\n" +
"\n" +
"SP Name: SP A\n" +
"SP Port ID: 2\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:62:08:60:01:95\n" +
"Link Status: Down\n" +
"Port Status: Online\n" +
"Switch Present: NO\n" +
"\n" +
"SP Name: SP B\n" +
"SP Port ID: 2\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:6A:08:60:08:0F\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: YES\n" +
"Switch UID: 10:00:00:05:1E:72:EC:A6:" +
"20:46:00:05:1E:72:EC:A6\n" +
"SP Source ID: 272896\n", 0)
def LUN_PROPERTY(self, name, is_thin=False, has_snap=False, size=1,
state='Ready', faulted='false', operation='None',
lunid=1, pool_name='unit_test_pool'):
return ("""
LOGICAL UNIT NUMBER %(lunid)s
Name: %(name)s
UID: 60:06:01:60:09:20:32:00:13:DF:B4:EF:C2:63:E3:11
Current Owner: SP A
Default Owner: SP A
Allocation Owner: SP A
Attached Snapshot: %(has_snap)s
User Capacity (Blocks): 2101346304
User Capacity (GBs): %(size)d
Consumed Capacity (Blocks): 2149576704
Consumed Capacity (GBs): 1024.998
Pool Name: %(pool_name)s
Current State: %(state)s
Status: OK(0x0)
Is Faulted: %(faulted)s
Is Transitioning: false
Current Operation: %(operation)s
Current Operation State: N/A
Current Operation Status: N/A
Current Operation Percent Completed: 0
Is Thin LUN: %(is_thin)s""" % {
'lunid': lunid,
'name': name,
'has_snap': 'FakeSnap' if has_snap else 'N/A',
'size': size,
'pool_name': pool_name,
'state': state,
'faulted': faulted,
'operation': operation,
'is_thin': 'Yes' if is_thin else 'No'}, 0)
def STORAGE_GROUP_ISCSI_FC_HBA(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 4
Host name: fakehost
SPPort: A-4v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56 SP B 2
Host name: fakehost2
SPPort: B-2v0
Initiator IP: N/A
TPGT: 0
ISID: N/A
22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16 SP B 2
Host name: fakehost2
SPPort: B-2v0
Initiator IP: N/A
TPGT: 0
ISID: N/A
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
1 1
Shareable: YES""" % sgname, 0)
def STORAGE_GROUP_NO_MAP(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 27:D2:BE:C1:9B:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
Shareable: YES""" % sgname, 0)
def STORAGE_GROUP_HAS_MAP(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 4
Host name: fakehost
SPPort: A-4v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
1 1
Shareable: YES""" % sgname, 0)
def STORAGE_GROUP_HAS_MAP_ISCSI(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 2
Host name: fakehost
SPPort: A-2v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
iqn.1993-08.org.debian:01:222 SP A 0
Host name: fakehost
SPPort: A-0v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
iqn.1993-08.org.debian:01:222 SP B 2
Host name: fakehost
SPPort: B-2v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
1 1
Shareable: YES""" % sgname, 0)
def STORAGE_GROUP_HAS_MAP_MP(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 4
Host name: fakehost
SPPort: A-4v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
iqn.1993-08.org.debian:01:222 SP A 5
Host name: fakehost
SPPort: A-5v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
1 1
Shareable: YES""" % sgname, 0)
def STORAGE_GROUP_HAS_MAP_2(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 4
Host name: fakehost
SPPort: A-4v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
1 1
2 3
Shareable: YES""" % sgname, 0)
def POOL_FEATURE_INFO_POOL_LUNS_CMD(self):
cmd = ('storagepool', '-feature', '-info',
'-maxPoolLUNs', '-numPoolLUNs')
return cmd
def POOL_FEATURE_INFO_POOL_LUNS(self, max, total):
return (('Max. Pool LUNs: %s\n' % max) +
('Total Number of Pool LUNs: %s\n' % total), 0)
def STORAGE_GROUPS_HAS_MAP(self, sgname1, sgname2):
return ("""
Storage Group Name: irrelative
Storage Group UID: 9C:86:4F:30:07:76:E4:11:AC:83:C8:C0:8E:9C:D6:1F
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:5741c6307e60 SP A 6
Host name: fakehost
SPPort: A-6v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
Storage Group Name: %(sgname1)s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 4
Host name: fakehost
SPPort: A-4v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
31 3
41 4
Shareable: YES
Storage Group Name: %(sgname2)s
Storage Group UID: 9C:86:4F:30:07:76:E4:11:AC:83:C8:C0:8E:9C:D6:1F
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:5741c6307e60 SP A 6
Host name: fakehost
SPPort: A-6v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
32 3
42 4
Shareable: YES""" % {'sgname1': sgname1,
'sgname2': sgname2}, 0)
def LUN_DELETE_IN_SG_ERROR(self, up_to_date=True):
if up_to_date:
return ("Cannot unbind LUN "
"because it's contained in a Storage Group",
156)
else:
return ("SP B: Request failed. "
"Host LUN/LUN mapping still exists.",
0)
def set_path_cmd(self, gname, hba, sp, spport, vport=None, ip=None):
if vport is None:
return ('storagegroup', '-setpath', '-gname', gname,
'-hbauid', hba,
'-sp', sp, '-spport', spport,
'-ip', ip, '-host', gname, '-o')
return ('storagegroup', '-setpath', '-gname', gname,
'-hbauid', hba,
'-sp', sp, '-spport', spport, '-spvport', vport,
'-ip', ip, '-host', gname, '-o')
class DriverTestCaseBase(test.TestCase):
def setUp(self):
super(DriverTestCaseBase, self).setUp()
self.stubs.Set(emc_vnx_cli.CommandLineHelper, 'command_execute',
self.fake_command_execute_for_driver_setup)
self.stubs.Set(emc_vnx_cli.CommandLineHelper, 'get_array_serial',
mock.Mock(return_value={'array_serial':
'fakeSerial'}))
self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
self.stubs.Set(emc_vnx_cli, 'INTERVAL_5_SEC', 0.01)
self.stubs.Set(emc_vnx_cli, 'INTERVAL_30_SEC', 0.01)
self.configuration = conf.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
self.configuration.san_ip = '10.0.0.1'
self.configuration.storage_vnx_pool_name = 'unit_test_pool'
self.configuration.san_login = 'sysadmin'
self.configuration.san_password = 'sysadmin'
self.configuration.initiator_auto_registration = True
self.configuration.check_max_pool_luns_threshold = False
self.stubs.Set(self.configuration, 'safe_get',
self.fake_safe_get({'storage_vnx_pool_names':
'unit_test_pool',
'volume_backend_name':
'namedbackend'}))
self.testData = EMCVNXCLIDriverTestData()
self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
'-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
self.configuration.ignore_pool_full_threshold = False
def driverSetup(self, commands=tuple(), results=tuple()):
self.driver = self.generate_driver(self.configuration)
fake_command_execute = self.get_command_execute_simulator(
commands, results)
fake_cli = mock.Mock(side_effect=fake_command_execute)
self.driver.cli._client.command_execute = fake_cli
return fake_cli
def generate_driver(self, conf):
raise NotImplementedError
def get_command_execute_simulator(self, commands=tuple(),
results=tuple()):
assert(len(commands) == len(results))
def fake_command_execute(*args, **kwargv):
for i in range(len(commands)):
if args == commands[i]:
if isinstance(results[i], list):
if len(results[i]) > 0:
ret = results[i][0]
del results[i][0]
return ret
else:
return results[i]
return self.standard_fake_command_execute(*args, **kwargv)
return fake_command_execute
def standard_fake_command_execute(self, *args, **kwargv):
standard_commands = [
self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
self.testData.LUN_PROPERTY_ALL_CMD('vol-vol1'),
self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'),
self.testData.POOL_PROPERTY_CMD]
standard_results = [
self.testData.LUN_PROPERTY('vol1'),
self.testData.LUN_PROPERTY('vol2'),
self.testData.LUN_PROPERTY('vol2_dest'),
self.testData.LUN_PROPERTY('vol-vol1'),
self.testData.LUN_PROPERTY('snapshot1'),
self.testData.POOL_PROPERTY]
standard_default = SUCCEED
for i in range(len(standard_commands)):
if args == standard_commands[i]:
return standard_results[i]
return standard_default
def fake_command_execute_for_driver_setup(self, *command, **kwargv):
if (command == ('connection', '-getport', '-address', '-vlanid') or
command == ('connection', '-getport', '-vlanid')):
return self.testData.ALL_PORTS
elif command == ('storagepool', '-list', '-state'):
return self.testData.POOL_GET_STATE_RESULT([
{'pool_name': self.testData.test_pool_name, 'state': "Ready"},
{'pool_name': "unit_test_pool2", 'state': "Ready"}])
if command == self.testData.GETFCPORT_CMD():
return self.testData.FC_PORTS
else:
return SUCCEED
def fake_safe_get(self, values):
def _safe_get(key):
return values.get(key)
return _safe_get
class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
def generate_driver(self, conf):
return emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_create_destroy_volume_without_extra_spec(self):
fake_cli = self.driverSetup()
self.driver.create_volume(self.testData.test_volume)
self.driver.delete_volume(self.testData.test_volume)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1', 1,
'unit_test_pool',
'thick', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.LUN_DELETE_CMD('vol1'))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_create_volume_ignore_thresholds(self):
self.configuration.ignore_pool_full_threshold = True
fake_cli = self.driverSetup()
self.driver.create_volume(self.testData.test_volume)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1', 1,
'unit_test_pool',
'thick', None,
ignore_thresholds=True, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'compressed'}))
def test_create_volume_compressed(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
# case
self.driver.create_volume(self.testData.test_volume_with_type)
# verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'compressed', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=True),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
1))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'provisioning:type': 'thin',
'storagetype:provisioning': 'thick'}))
def test_create_volume_thin(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
# case
self.driver.create_volume(self.testData.test_volume_with_type)
# verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'thin', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'provisioning:type': 'thick'}))
def test_create_volume_thick(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', False),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
# case
self.driver.create_volume(self.testData.test_volume_with_type)
# verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'thick', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'compressed',
'storagetype:tiering': 'HighestAvailable'}))
def test_create_volume_compressed_tiering_highestavailable(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
# case
self.driver.create_volume(self.testData.test_volume_with_type)
# verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'compressed', 'highestavailable', poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=True),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
1))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'deduplicated'}))
def test_create_volume_deduplicated(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
# case
self.driver.create_volume(self.testData.test_volume_with_type)
# verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'deduplicated', None, poll=False))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:tiering': 'Auto'}))
def test_create_volume_tiering_auto(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
# case
self.driver.create_volume(self.testData.test_volume_with_type)
# verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
None, 'auto', poll=False))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:tiering': 'Auto',
'storagetype:provisioning': 'Deduplicated'}))
def test_create_volume_deduplicated_tiering_auto(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
self.driverSetup(commands, results)
ex = self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
self.testData.test_volume_with_type)
self.assertTrue(
re.match(r".*deduplicated and auto tiering can't be both enabled",
ex.msg))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'Compressed'}))
def test_create_volume_compressed_no_enabler(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
('No package', 0)]
self.driverSetup(commands, results)
ex = self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
self.testData.test_volume_with_type)
self.assertTrue(
re.match(r".*Compression Enabler is not installed",
ex.msg))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'copytype:snap': 'true'}))
def test_create_volume_snapcopy_in_cg(self):
self.driverSetup()
vol = self.testData.test_volume_with_type.copy()
vol['consistencygroup_id'] = '7450764f-9d24-4c70-ad46-7cd90acd4292'
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
vol)
def test_get_volume_stats(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True)]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True)]
self.driverSetup(commands, results)
stats = self.driver.get_volume_stats(True)
self.assertTrue(stats['driver_version'] == VERSION,
"driver_version is incorrect")
self.assertTrue(
stats['storage_protocol'] == 'iSCSI',
"storage_protocol is incorrect")
self.assertTrue(
stats['vendor_name'] == "EMC",
"vendor name is incorrect")
self.assertTrue(
stats['volume_backend_name'] == "namedbackend",
"volume backend name is incorrect")
pool_stats = stats['pools'][0]
expected_pool_stats = {
'free_capacity_gb': 3105.303,
'reserved_percentage': 32,
'location_info': 'unit_test_pool|fakeSerial',
'total_capacity_gb': 3281.146,
'provisioned_capacity_gb': 536.14,
'compression_support': 'True',
'deduplication_support': 'True',
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'max_over_subscription_ratio': 20.0,
'consistencygroup_support': 'True',
'pool_name': 'unit_test_pool',
'fast_cache_enabled': 'True',
'fast_support': 'True'}
self.assertEqual(expected_pool_stats, pool_stats)
def test_get_volume_stats_ignore_threshold(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True)]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True)]
self.driverSetup(commands, results)
self.driver.cli.ignore_pool_full_threshold = True
stats = self.driver.get_volume_stats(True)
pool_stats = stats['pools'][0]
self.assertEqual(2, pool_stats['reserved_percentage'])
def test_get_volume_stats_reserved_percentage_from_conf(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True)]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True)]
self.configuration.reserved_percentage = 22
self.driverSetup(commands, results)
self.driver.cli.ignore_pool_full_threshold = True
stats = self.driver.get_volume_stats(True)
pool_stats = stats['pools'][0]
self.assertEqual(22, pool_stats['reserved_percentage'])
def test_get_volume_stats_too_many_luns(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True),
self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True),
self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.check_max_pool_luns_threshold = True
stats = self.driver.get_volume_stats(True)
pool_stats = stats['pools'][0]
self.assertTrue(
pool_stats['free_capacity_gb'] == 0,
"free_capacity_gb is incorrect")
expect_cmd = [
mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
self.driver.cli.check_max_pool_luns_threshold = False
stats = self.driver.get_volume_stats(True)
pool_stats = stats['pools'][0]
self.assertTrue(stats['driver_version'] is not None,
"driver_version is not returned")
self.assertTrue(
pool_stats['free_capacity_gb'] == 3105.303,
"free_capacity_gb is incorrect")
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
def test_volume_migration_timeout(self):
commands = [self.testData.MIGRATION_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1)]
FAKE_ERROR_MSG = """\
A network error occurred while trying to connect: '10.244.213.142'.
Message : Error occurred because connection refused. \
Unable to establish a secure connection to the Management Server.
"""
FAKE_ERROR_MSG = FAKE_ERROR_MSG.replace('\n', ' ')
FAKE_MIGRATE_PROPERTY = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATED
Percent Complete: 100
Time Remaining: 0 second(s)
"""
results = [(FAKE_ERROR_MSG, 255),
[(FAKE_MIGRATE_PROPERTY, 0),
(FAKE_MIGRATE_PROPERTY, 0),
('The specified source LUN is not currently migrating',
23)]]
fake_cli = self.driverSetup(commands, results)
fakehost = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume,
fakehost)[0]
self.assertTrue(ret)
# verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
def test_volume_migration(self):
commands = [self.testData.MIGRATION_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1)]
FAKE_MIGRATE_PROPERTY = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATED
Percent Complete: 100
Time Remaining: 0 second(s)
"""
results = [SUCCEED,
[(FAKE_MIGRATE_PROPERTY, 0),
('The specified source LUN is not '
'currently migrating', 23)]]
fake_cli = self.driverSetup(commands, results)
fake_host = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume,
fake_host)[0]
self.assertTrue(ret)
# verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value={'lun_id': 5}))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:tiering': 'Auto'}))
def test_volume_migration_02(self):
commands = [self.testData.MIGRATION_CMD(5, 5),
self.testData.MIGRATION_VERIFY_CMD(5)]
FAKE_MIGRATE_PROPERTY = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATED
Percent Complete: 100
Time Remaining: 0 second(s)
"""
results = [SUCCEED,
[(FAKE_MIGRATE_PROPERTY, 0),
('The specified source LUN is not currently migrating',
23)]]
fake_cli = self.driverSetup(commands, results)
fakehost = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume5,
fakehost)[0]
self.assertTrue(ret)
# verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(5, 5),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(5),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(5),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
def test_volume_migration_failed(self):
commands = [self.testData.MIGRATION_CMD()]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
fakehost = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume,
fakehost)[0]
self.assertFalse(ret)
# verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
def test_volume_migration_stopped(self):
commands = [self.testData.MIGRATION_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1),
self.testData.MIGRATION_CANCEL_CMD(1)]
results = [SUCCEED, [(self.testData.MIGRATE_PROPERTY_MIGRATING, 0),
(self.testData.MIGRATE_PROPERTY_STOPPED, 0),
('The specified source LUN is not '
'currently migrating', 23)],
SUCCEED]
fake_cli = self.driverSetup(commands, results)
fake_host = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
self.assertRaisesRegex(exception.VolumeBackendAPIException,
"Migration of LUN 1 has been stopped or"
" faulted.",
self.driver.migrate_volume,
None, self.testData.test_volume, fake_host)
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=False),
mock.call(*self.testData.MIGRATION_CANCEL_CMD(1)),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:tiering': 'Auto',
'copytype:snap': 'true'}))
def test_volume_migration_smp(self):
commands = [self.testData.MIGRATION_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1)]
FAKE_MIGRATE_PROPERTY = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATED
Percent Complete: 100
Time Remaining: 0 second(s)
"""
results = [SUCCEED,
[(FAKE_MIGRATE_PROPERTY, 0),
('The specified source LUN is not '
'currently migrating', 23)]]
fake_cli = self.driverSetup(commands, results)
fake_host = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
vol = self.testData.test_volume.copy()
vol['provider_location'] = 'system^FNM11111|type^smp|id^1'
tmp_snap = "snap-as-vol-%s" % vol['id']
ret = self.driver.migrate_volume(None,
vol,
fake_host)
self.assertTrue(ret[0])
self.assertTrue(
ret[1]['provider_location'].find('type^lun') > 0)
# verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=False),
mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
def test_create_destroy_volume_snapshot(self):
fake_cli = self.driverSetup()
# case
self.driver.create_snapshot(self.testData.test_snapshot)
self.driver.delete_snapshot(self.testData.test_snapshot)
# verification
expect_cmd = [mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1'),
poll=False),
mock.call(*self.testData.SNAP_DELETE_CMD('snapshot1'),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_snapshot_preparing_volume(self):
commands = [self.testData.SNAP_CREATE_CMD('snapshot1'),
self.testData.LUN_PROPERTY_ALL_CMD('vol1')]
results = [[self.testData.LUN_PREP_ERROR(), SUCCEED],
[self.testData.LUN_PROPERTY('vol1', size=1,
operation='Preparing'),
self.testData.LUN_PROPERTY('vol1', size=1,
operation='Optimizing'),
self.testData.LUN_PROPERTY('vol1', size=1,
operation='None')]]
fake_cli = self.driverSetup(commands, results)
self.driver.create_snapshot(self.testData.test_snapshot)
expected = [mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=True),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1'),
poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch(
"oslo_concurrency.processutils.execute",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection(self):
# Test for auto registration
self.configuration.initiator_auto_registration = True
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
self.testData.PING_OK]
fake_cli = self.driverSetup(commands, results)
connection_info = self.driver.initialize_connection(
self.testData.test_volume,
self.testData.connector)
self.assertEqual(self.testData.iscsi_connection_info,
connection_info)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call(*self.testData.set_path_cmd(
'fakehost', 'iqn.1993-08.org.debian:01:222', 'A',
4, 0, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', 'iqn.1993-08.org.debian:01:222',
'A', 0, 0, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', 'iqn.1993-08.org.debian:01:222',
'B', 2, 0, '10.0.0.2')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o',
poll=False),
mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
'10.0.0.2'))]
fake_cli.assert_has_calls(expected)
# Test for manual registration
self.configuration.initiator_auto_registration = False
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
results = [
[("No group", 83),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
('', 0),
self.testData.PING_OK
]
fake_cli = self.driverSetup(commands, results)
test_volume_rw = self.testData.test_volume_rw
connection_info = self.driver.initialize_connection(
test_volume_rw,
self.testData.connector)
self.assertEqual(self.testData.iscsi_connection_info,
connection_info)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o', poll=False),
mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
'10.0.0.2'))]
fake_cli.assert_has_calls(expected)
# Test No Ping
self.configuration.iscsi_initiators = None
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost')]
results = [
[("No group", 83),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
('', 0)]
fake_cli = self.driverSetup(commands, results)
test_volume_rw = self.testData.test_volume_rw.copy()
test_volume_rw['provider_location'] = 'system^fakesn|type^lun|id^1'
connection_info = self.driver.initialize_connection(
test_volume_rw,
self.testData.connector)
self.assertEqual(self.testData.iscsi_connection_info,
connection_info)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o', poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch('random.randint',
mock.Mock(return_value=0))
@mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
'CommandLineHelper.ping_node',
mock.Mock(return_value=True))
@mock.patch('random.shuffle', mock.Mock(return_value=0))
def test_initialize_connection_multipath(self):
self.configuration.initiator_auto_registration = False
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost')]
results = [self.testData.STORAGE_GROUP_HAS_MAP_MP('fakehost')]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.iscsi_targets = {
'A': [
{'Port WWN': 'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
'SP': 'A',
'Port ID': 4,
'Virtual Port ID': 0,
'IP Address': '10.244.214.118'},
{'Port WWN': 'iqn.1992-04.com.emc:cx.fnm00124000215.a5',
'SP': 'A',
'Port ID': 5,
'Virtual Port ID': 0,
'IP Address': '10.244.214.119'}],
'B': []}
test_volume_rw = self.testData.test_volume_rw.copy()
test_volume_rw['provider_location'] = 'system^fakesn|type^lun|id^1'
connector_m = dict(self.testData.connector)
connector_m['multipath'] = True
connection_info = self.driver.initialize_connection(
test_volume_rw,
connector_m)
self.assertEqual(self.testData.iscsi_connection_info_mp,
connection_info)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o', poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch(
"oslo_concurrency.processutils.execute",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
return_value=3))
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_exist(self):
"""Test if initialize connection exists.
A LUN is added to the SG right before the attach,
it may not exists in the first SG query
"""
# Test for auto registration
self.configuration.initiator_auto_registration = True
self.configuration.max_luns_per_storage_group = 2
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
('storagegroup', '-addhlu', '-hlu', 2, '-alu', 3,
'-gname', 'fakehost', '-o'),
self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
results = [[self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost')],
("fakeerror", 23),
self.testData.PING_OK]
fake_cli = self.driverSetup(commands, results)
iscsi_data = self.driver.initialize_connection(
self.testData.test_volume,
self.testData.connector
)
self.assertTrue(iscsi_data['data']['target_lun'] == 2,
"iSCSI initialize connection returned wrong HLU")
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 3,
'-gname', 'fakehost', '-o',
poll=False),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
'10.0.0.2'))]
fake_cli.assert_has_calls(expected)
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_iscsi_white_list(self):
self.configuration.io_port_list = 'a-0-0,B-2-0'
test_volume = self.testData.test_volume.copy()
test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
# Test for auto registration
self.configuration.initiator_auto_registration = True
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_HAS_MAP_ISCSI('fakehost')]]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.iscsi_targets = {'A': [{'SP': 'A', 'Port ID': 0,
'Virtual Port ID': 0,
'Port WWN': 'fake_iqn',
'IP Address': '192.168.1.1'}],
'B': [{'SP': 'B', 'Port ID': 2,
'Virtual Port ID': 0,
'Port WWN': 'fake_iqn1',
'IP Address': '192.168.1.2'}]}
self.driver.initialize_connection(
test_volume,
self.testData.connector)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call(*self.testData.set_path_cmd(
'fakehost', 'iqn.1993-08.org.debian:01:222',
'A', 0, 0, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', 'iqn.1993-08.org.debian:01:222',
'B', 2, 0, '10.0.0.2')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o',
poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
'EMCVnxCliBase._build_pool_stats',
mock.Mock(return_value=None))
@mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
'CommandLineHelper.get_pool',
mock.Mock(return_value={'total_capacity_gb': 0.0,
'free_capacity_gb': 0.0}))
def test_update_iscsi_io_ports(self):
self.configuration.io_port_list = 'a-0-0,B-2-0'
# Test for auto registration
self.configuration.initiator_auto_registration = True
commands = [self.testData.GETPORT_CMD()]
results = [self.testData.WHITE_LIST_PORTS]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.update_volume_stats()
expected = [mock.call(*self.testData.GETPORT_CMD(), poll=False)]
fake_cli.assert_has_calls(expected)
io_ports = self.driver.cli.iscsi_targets
self.assertEqual((0, 'iqn.1992-04.com.emc:cx.fnmxxx.a0'),
(io_ports['A'][0]['Port ID'],
io_ports['A'][0]['Port WWN']))
self.assertEqual((2, 'iqn.1992-04.com.emc:cx.fnmxxx.b2'),
(io_ports['B'][0]['Port ID'],
io_ports['B'][0]['Port WWN']))
@mock.patch(
"oslo_concurrency.processutils.execute",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
return_value=4))
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_no_hlu_left_1(self):
"""Test initialize connection with no hlu per first SG query.
There is no hlu per the first SG query
But there are hlu left after the full poll
"""
# Test for auto registration
self.configuration.initiator_auto_registration = True
self.configuration.max_luns_per_storage_group = 2
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
('storagegroup', '-addhlu', '-hlu', 2, '-alu', 4,
'-gname', 'fakehost', '-o'),
self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
results = [[self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
("", 0),
self.testData.PING_OK]
fake_cli = self.driverSetup(commands, results)
iscsi_data = self.driver.initialize_connection(
self.testData.test_volume,
self.testData.connector)
self.assertTrue(iscsi_data['data']['target_lun'] == 2,
"iSCSI initialize connection returned wrong HLU")
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 4,
'-gname', 'fakehost', '-o',
poll=False),
mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
u'10.0.0.2'))]
fake_cli.assert_has_calls(expected)
@mock.patch(
"oslo_concurrency.processutils.execute",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
return_value=4))
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_no_hlu_left_2(self):
"""Test initialize connection with no hlu left."""
# Test for auto registration
self.configuration.initiator_auto_registration = True
self.configuration.max_luns_per_storage_group = 2
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost')]
results = [
[self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost')]
]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
self.testData.test_volume,
self.testData.connector)
expected = [
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
]
fake_cli.assert_has_calls(expected)
@mock.patch('os.path.exists', return_value=True)
def test_terminate_connection(self, _mock_exists):
self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {1: 16, 2: 88, 3: 47}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
lun_info = {'lun_name': "unit_test_lun",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready"}
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.terminate_connection(self.testData.test_volume,
self.testData.connector)
cli_helper.remove_hlu_from_storagegroup.assert_called_once_with(
16, self.testData.connector["host"])
def test_create_volume_cli_failed(self):
commands = [self.testData.LUN_CREATION_CMD(
'failed_vol1', 1, 'unit_test_pool', None, None, poll=False)]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.create_volume,
self.testData.test_failed_volume)
expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD(
'failed_vol1', 1, 'unit_test_pool', None, None, poll=False))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_create_faulted_volume(self):
volume_name = 'faulted_volume'
cmd_create = self.testData.LUN_CREATION_CMD(
volume_name, 1, 'unit_test_pool', None, None, poll=False)
cmd_list_preparing = self.testData.LUN_PROPERTY_ALL_CMD(volume_name)
commands = [cmd_create, cmd_list_preparing]
results = [SUCCEED,
[self.testData.LUN_PROPERTY(name=volume_name,
state='Faulted',
faulted='true',
operation='Preparing'),
self.testData.LUN_PROPERTY(name=volume_name,
state='Faulted',
faulted='true',
operation='None')]]
fake_cli = self.driverSetup(commands, results)
faulted_volume = self.testData.test_volume.copy()
faulted_volume.update({'name': volume_name})
self.driver.create_volume(faulted_volume)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
volume_name, 1, 'unit_test_pool', None, None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(volume_name),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(volume_name),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_create_offline_volume(self):
volume_name = 'offline_volume'
cmd_create = self.testData.LUN_CREATION_CMD(
volume_name, 1, 'unit_test_pool', None, None, poll=False)
cmd_list = self.testData.LUN_PROPERTY_ALL_CMD(volume_name)
commands = [cmd_create, cmd_list]
results = [SUCCEED,
self.testData.LUN_PROPERTY(name=volume_name,
state='Offline',
faulted='true')]
self.driverSetup(commands, results)
offline_volume = self.testData.test_volume.copy()
offline_volume.update({'name': volume_name})
self.assertRaisesRegex(exception.VolumeBackendAPIException,
"Volume %s was created in VNX, but in"
" Offline state." % volume_name,
self.driver.create_volume,
offline_volume)
def test_create_volume_snapshot_failed(self):
commands = [self.testData.SNAP_CREATE_CMD('failed_snapshot')]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
# case
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.create_snapshot,
self.testData.test_failed_snapshot)
# verification
expect_cmd = [
mock.call(
*self.testData.SNAP_CREATE_CMD('failed_snapshot'),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
def test_create_volume_from_snapshot(self):
# set up
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [cmd_dest, cmd_dest_np, cmd_migrate,
cmd_migrate_verify]
results = [output_dest, output_dest, output_migrate,
output_migrate_verify]
fake_cli1 = self.driverSetup(commands, results)
self.driver.create_volume_from_snapshot(self.testData.test_volume2,
self.testData.test_snapshot)
expect_cmd1 = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True)]
fake_cli1.assert_has_calls(expect_cmd1)
self.configuration.ignore_pool_full_threshold = True
fake_cli2 = self.driverSetup(commands, results)
self.driver.create_volume_from_snapshot(self.testData.test_volume2,
self.testData.test_snapshot)
expect_cmd2 = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None,
ignore_thresholds=True))]
fake_cli2.assert_has_calls(expect_cmd2)
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'copytype:snap': 'true'}))
def test_create_volume_from_snapshot_smp(self):
fake_cli = self.driverSetup()
vol = self.driver.create_volume_from_snapshot(
self.testData.test_volume_with_type,
self.testData.test_snapshot)
self.assertTrue(
vol['provider_location'].find('type^smp') > 0)
expect_cmd = [
mock.call(
*self.testData.SNAP_COPY_CMD(
src_snap='snapshot1',
snap_name='snap-as-vol-%s' % '1')),
mock.call(
*self.testData.SNAP_MODIFY_CMD(
name='snap-as-vol-%s' % '1',
rw='yes')),
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol_with_type', source='vol1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol_with_type', snapName='snap-as-vol-%s' % '1'))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_create_volume_from_snapshot_sync_failed(self):
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
cmd_detach_lun = ('lun', '-detach', '-name', 'vol2', '-o')
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
cmd_migrate_cancel = self.testData.MIGRATION_CANCEL_CMD(1)
output_migrate_cancel = ("", 0)
commands = [cmd_dest, cmd_dest_np, cmd_migrate,
cmd_migrate_verify, cmd_migrate_cancel]
results = [output_dest, output_dest, output_migrate,
[FAKE_ERROR_RETURN, output_migrate_verify],
output_migrate_cancel]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.testData.test_volume2,
self.testData.test_snapshot)
expect_cmd = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.MIGRATION_CANCEL_CMD(1)),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=False),
mock.call(*self.testData.LUN_DELETE_CMD('vol2_dest')),
mock.call(*cmd_detach_lun),
mock.call(*self.testData.LUN_DELETE_CMD('vol2'))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_vol_from_snap_failed_in_migrate_lun(self):
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
cmd_detach_lun = ('lun', '-detach', '-name', 'vol2', '-o')
commands = [cmd_dest, cmd_migrate]
results = [output_dest, FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.testData.test_volume2,
self.testData.test_snapshot)
expect_cmd = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'), poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
poll=True,
retry_disable=True),
mock.call(*self.testData.LUN_DELETE_CMD('vol2_dest')),
mock.call(*cmd_detach_lun),
mock.call(*self.testData.LUN_DELETE_CMD('vol2'))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_cloned_volume(self):
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("clone1_dest")
cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD("clone1_dest")
output_dest = self.testData.LUN_PROPERTY("clone1_dest")
cmd_clone = self.testData.LUN_PROPERTY_ALL_CMD("clone1")
output_clone = self.testData.LUN_PROPERTY("clone1")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [cmd_dest, cmd_dest_p, cmd_clone, cmd_migrate,
cmd_migrate_verify]
results = [output_dest, output_dest, output_clone, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
volume = self.testData.test_volume.copy()
volume['name'] = 'clone1'
self.driver.create_cloned_volume(volume, self.testData.test_volume)
tmp_snap = 'tmp-snap-' + self.testData.test_volume['id']
expect_cmd = [
mock.call(
*self.testData.SNAP_CREATE_CMD(tmp_snap), poll=False),
mock.call(*self.testData.SNAP_MP_CREATE_CMD(
name='clone1',
source='vol1'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('clone1'),
poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='clone1', snapName=tmp_snap)),
mock.call(*self.testData.LUN_CREATION_CMD(
'clone1_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('clone1_dest'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('clone1_dest'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
poll=True,
retry_disable=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'copytype:snap': 'true'}))
def test_create_cloned_volume_smp(self):
fake_cli = self.driverSetup()
vol = self.driver.create_cloned_volume(
self.testData.test_clone,
self.testData.test_volume_with_type)
self.assertTrue(
vol['provider_location'].find('type^smp') > 0)
expect_cmd = [
mock.call(
*self.testData.SNAP_CREATE_CMD(
name='snap-as-vol-%s' % '2'),
poll=False),
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='clone1', source='vol_with_type'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('clone1'),
poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='clone1', snapName='snap-as-vol-%s' % '2'))]
fake_cli.assert_has_calls(expect_cmd)
def test_delete_volume_failed(self):
commands = [self.testData.LUN_DELETE_CMD('failed_vol1')]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_failed_volume)
expected = [mock.call(*self.testData.LUN_DELETE_CMD('failed_vol1'))]
fake_cli.assert_has_calls(expected)
def test_delete_volume_in_sg_failed(self):
commands = [self.testData.LUN_DELETE_CMD('vol1_in_sg'),
self.testData.LUN_DELETE_CMD('vol2_in_sg')]
results = [self.testData.LUN_DELETE_IN_SG_ERROR(),
self.testData.LUN_DELETE_IN_SG_ERROR(False)]
self.driverSetup(commands, results)
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_volume1_in_sg)
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_volume2_in_sg)
def test_delete_volume_in_sg_force(self):
commands = [self.testData.LUN_DELETE_CMD('vol1_in_sg'),
self.testData.STORAGEGROUP_LIST_CMD(),
self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost1',
'41'),
self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost1',
'42'),
self.testData.LUN_DELETE_CMD('vol2_in_sg'),
self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost2',
'31'),
self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost2',
'32')]
results = [[self.testData.LUN_DELETE_IN_SG_ERROR(),
SUCCEED],
self.testData.STORAGE_GROUPS_HAS_MAP('fakehost1',
'fakehost2'),
SUCCEED,
SUCCEED,
[self.testData.LUN_DELETE_IN_SG_ERROR(False),
SUCCEED],
SUCCEED,
SUCCEED]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.force_delete_lun_in_sg = True
self.driver.delete_volume(self.testData.test_volume1_in_sg)
self.driver.delete_volume(self.testData.test_volume2_in_sg)
expected = [mock.call(*self.testData.LUN_DELETE_CMD('vol1_in_sg')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD(),
poll=True),
mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
'fakehost1', '41'), poll=False),
mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
'fakehost2', '42'), poll=False),
mock.call(*self.testData.LUN_DELETE_CMD('vol1_in_sg')),
mock.call(*self.testData.LUN_DELETE_CMD('vol2_in_sg')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD(),
poll=True),
mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
'fakehost1', '31'), poll=False),
mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
'fakehost2', '32'), poll=False),
mock.call(*self.testData.LUN_DELETE_CMD('vol2_in_sg'))]
fake_cli.assert_has_calls(expected)
def test_delete_volume_smp(self):
fake_cli = self.driverSetup()
vol = self.testData.test_volume_with_type.copy()
vol['provider_location'] = 'system^FNM11111|type^smp|id^1'
tmp_snap = 'snap-as-vol-%s' % vol['id']
self.driver.delete_volume(vol)
expected = [mock.call(*self.testData.LUN_DELETE_CMD(vol['name'])),
mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
poll=True)]
fake_cli.assert_has_calls(expected)
def test_extend_volume(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol1')]
results = [self.testData.LUN_PROPERTY('vol1', size=2)]
fake_cli = self.driverSetup(commands, results)
# case
self.driver.extend_volume(self.testData.test_volume, 2)
expected = [mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False)]
fake_cli.assert_has_calls(expected)
def test_extend_volume_has_snapshot(self):
commands = [self.testData.LUN_EXTEND_CMD('failed_vol1', 2)]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.extend_volume,
self.testData.test_failed_volume,
2)
expected = [mock.call(*self.testData.LUN_EXTEND_CMD('failed_vol1', 2),
poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_extend_volume_failed(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1')]
results = [self.testData.LUN_PROPERTY('failed_vol1', size=2)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli._client.timeout = 0
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.testData.test_failed_volume,
3)
expected = [
mock.call(
*self.testData.LUN_EXTEND_CMD('failed_vol1', 3),
poll=False),
mock.call(
*self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1'),
poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_extend_preparing_volume(self):
commands = [self.testData.LUN_EXTEND_CMD('vol1', 2),
self.testData.LUN_PROPERTY_ALL_CMD('vol1')]
results = [[self.testData.LUN_PREP_ERROR(), SUCCEED],
[self.testData.LUN_PROPERTY('vol1', size=1,
operation='Preparing'),
self.testData.LUN_PROPERTY('vol1', size=1,
operation='Optimizing'),
self.testData.LUN_PROPERTY('vol1', size=1,
operation='None'),
self.testData.LUN_PROPERTY('vol1', size=2)]]
fake_cli = self.driverSetup(commands, results)
self.driver.extend_volume(self.testData.test_volume, 2)
expected = [mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=True),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False)]
fake_cli.assert_has_calls(expected)
def test_manage_existing(self):
lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
'-newName', 'vol_with_type', '-o')
commands = [lun_rename_cmd]
results = [SUCCEED]
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
fake_cli = self.driverSetup(commands, results)
self.driver.manage_existing(
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
expected = [mock.call(*lun_rename_cmd, poll=False)]
fake_cli.assert_has_calls(expected)
def test_manage_existing_source_name(self):
lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
'-newName', 'vol_with_type', '-o')
commands = [lun_rename_cmd]
results = [SUCCEED]
fake_cli = self.driverSetup(commands, results)
self.driver.manage_existing(
self.testData.test_volume_with_type,
self.testData.test_existing_ref_source_name)
expected = [mock.call(*lun_rename_cmd, poll=False)]
fake_cli.assert_has_calls(expected)
def test_manage_existing_lun_in_another_pool(self):
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-userCap', '-owner',
'-attachedSnapshot', '-poolName')
invalid_pool_name = "fake_pool"
commands = [get_lun_cmd]
results = [self.testData.LUN_PROPERTY('lun_name',
pool_name=invalid_pool_name)]
self.configuration.storage_vnx_pool_name = invalid_pool_name
fake_cli = self.driverSetup(commands, results)
# mock the command executor
ex = self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
self.assertTrue(
re.match(r'.*not managed by the host',
ex.msg))
expected = [mock.call(*get_lun_cmd, poll=True)]
fake_cli.assert_has_calls(expected)
def test_manage_existing_get_size(self):
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-userCap', '-owner',
'-attachedSnapshot', '-poolName')
test_size = 2
commands = [get_lun_cmd]
results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
fake_cli = self.driverSetup(commands, results)
get_size = self.driver.manage_existing_get_size(
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
expected = [mock.call(*get_lun_cmd, poll=True)]
assert get_size == test_size
fake_cli.assert_has_calls(expected)
# Test the function with invalid reference.
invaild_ref = {'fake': 'fake_ref'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
self.testData.test_volume_with_type,
invaild_ref)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'compressed'}))
def test_retype_compressed_to_deduplicated(self):
"""Unit test for retype compressed to deduplicated."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('compressed',
'deduplicated')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'deduplicated'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD(),
cmd_migrate_verify]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023),
output_migrate_verify]
fake_cli1 = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd1 = [
mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol3-123456'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, None),
retry_disable=True,
poll=True)]
fake_cli1.assert_has_calls(expect_cmd1)
self.configuration.ignore_pool_full_threshold = True
fake_cli2 = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd2 = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None,
ignore_thresholds=True))]
fake_cli2.assert_has_calls(expect_cmd2)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'thin'}))
def test_retype_thin_to_compressed_auto(self):
"""Unit test for retype thin to compressed and auto tiering."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('thin',
'compressed'),
'storagetype:tiering': (None, 'auto')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'compressed',
'storagetype:tiering': 'auto'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD(),
cmd_migrate_verify]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023),
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'compressed', 'auto')),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'deduplicated',
'storagetype:pool': 'unit_test_pool'}))
def test_retype_pool_changed_dedup_to_compressed_auto(self):
"""Test retype from dedup to compressed and auto tiering.
Unit test for retype dedup to compressed and auto tiering
and pool changed
"""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('deduplicated',
'compressed'),
'storagetype:tiering': (None, 'auto'),
'storagetype:pool': ('unit_test_pool',
'unit_test_pool2')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'compressed',
'storagetype:tiering': 'auto',
'storagetype:pool':
'unit_test_pool2'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host':
'ubuntu-server12@pool_backend_1#unit_test_pool2',
'capabilities':
{'location_info': 'unit_test_pool2|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1)]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023),
('The specified source LUN is not currently migrating', 23)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool2', 'compressed', 'auto')),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'compressed',
'storagetype:pool': 'unit_test_pool',
'storagetype:tiering': 'auto'}))
def test_retype_compressed_auto_to_compressed_nomovement(self):
"""Unit test for retype only tiering changed."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:tiering': ('auto', 'nomovement')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'compressed',
'storagetype:tiering': 'nomovement',
'storagetype:pool':
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {
'host': 'host@backendsec#unit_test_pool',
'capabilities': {
'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD()]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call('lun', '-modify', '-name', 'vol3', '-o', '-initialTier',
'optimizePool', '-tieringPolicy', 'noMovement')]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'thin',
'storagetype:pool': 'unit_test_pool'}))
def test_retype_compressed_to_thin_cross_array(self):
"""Unit test for retype cross array."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('compressed', 'thin')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning': 'thin',
'storagetype:pool':
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {
'host': 'ubuntu-server12@pool_backend_2#unit_test_pool',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500891',
'volume_backend_name': 'pool_backend_2',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD()]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
retyped = self.driver.retype(None, self.testData.test_volume3,
new_type_data, diff_data,
host_test_data)
self.assertFalse(retyped,
"Retype should failed due to"
" different protocol or array")
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'thin',
'storagetype:tiering': 'auto',
'storagetype:pool': 'unit_test_pool'}))
def test_retype_thin_auto_to_dedup_diff_procotol(self):
"""Unit test for retype different procotol."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('thin', 'deduplicated'),
'storagetype:tiering': ('auto', None)}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'deduplicated',
'storagetype:pool':
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {
'host': 'ubuntu-server12@pool_backend_2#unit_test_pool',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_2',
'storage_protocol': 'FC'}}
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1)]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023),
('The specified source LUN is not currently migrating', 23)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'thin',
'storagetype:tiering': 'auto',
'storagetype:pool': 'unit_test_pool'}))
def test_retype_thin_auto_has_snap_to_thick_highestavailable(self):
"""Unit test for retype volume has snap when need migration."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('thin', None),
'storagetype:tiering': ('auto', 'highestAvailable')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:tiering':
'highestAvailable',
'storagetype:pool':
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {
'host': 'ubuntu-server12@pool_backend_1#unit_test_pool',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD()]
results = [self.testData.NDU_LIST_RESULT,
('Has snap', 0)]
self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
retyped = self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
self.assertFalse(retyped,
"Retype should failed due to"
" different protocol or array")
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'thin',
'storagetype:tiering': 'auto',
'storagetype:pool': 'unit_test_pool'}))
def test_retype_thin_auto_to_thin_auto(self):
"""Unit test for retype volume which has no change."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs': {}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:tiering':
'auto',
'storagetype:provisioning':
'thin'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {
'host': 'ubuntu-server12@pool_backend_1#unit_test_pool',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD]
results = [self.testData.NDU_LIST_RESULT]
self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper."
"migrate_lun_with_verification",
mock.Mock(return_value=True))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper."
"create_lun_with_advance_feature",
mock.Mock(return_value={'lun_id': '1'}))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'thin',
'copytype:snap': 'true'}))
def test_retype_copytype_snap_true_to_false(self):
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'copytype:snap': ('true',
'false')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning': 'thin',
'copytype:snap': 'false'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD(),
cmd_migrate_verify]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023),
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
vol = self.testData.test_volume3.copy()
vol['provider_location'] = 'system^FNM11111|type^smp|id^1'
tmp_snap = 'snap-as-vol-%s' % vol['id']
ret = self.driver.retype(None, vol,
new_type_data,
diff_data,
host_test_data)
self.assertTrue(type(ret) == tuple)
self.assertTrue(ret[0])
self.assertTrue(
ret[1]['provider_location'].find('type^lun') > 0)
expect_cmd = [
mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'fast_cache_enabled': 'True'}))
def test_create_volume_with_fastcache(self):
"""Test creating volume with fastcache enabled."""
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_PROPERTY_W_FASTCACHE_CMD,
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_PROPERTY_W_FASTCACHE,
self.testData.LUN_PROPERTY('vol_with_type', True),
]
fake_cli = self.driverSetup(commands, results)
lun_info = {'lun_name': "vol_with_type",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready",
'status': 'OK(0x0)',
'operation': 'None'
}
cli_helper = self.driver.cli._client
cli_helper.command_execute = fake_cli
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.get_enablers_on_array = mock.Mock(return_value="-FASTCache")
cli_helper.get_pool_list = mock.Mock(return_value=[{
'lun_nums': 1000,
'total_capacity_gb': 10,
'free_capacity_gb': 5,
'provisioned_capacity_gb': 8,
'pool_name': "unit_test_pool",
'fast_cache_enabled': 'True',
'state': 'Ready',
'pool_full_threshold': 70.0}])
self.driver.update_volume_stats()
self.driver.create_volume(self.testData.test_volume_with_type)
pool_stats = self.driver.cli.stats['pools'][0]
self.assertEqual('True', pool_stats['fast_cache_enabled'])
expect_cmd = [
mock.call('connection', '-getport', '-address', '-vlanid',
poll=False),
mock.call('-np', 'lun', '-create', '-capacity',
1, '-sq', 'gb', '-poolName',
self.testData.test_pool_name,
'-name', 'vol_with_type', '-type', 'NonThin')
]
fake_cli.assert_has_calls(expect_cmd)
def test_get_lun_id_provider_location_exists(self):
"""Test function get_lun_id."""
self.driverSetup()
volume_01 = {
'name': 'vol_01',
'size': 1,
'volume_name': 'vol_01',
'id': '1',
'name_id': '1',
'provider_location': 'system^FNM11111|type^lun|id^4',
'project_id': 'project',
'display_name': 'vol_01',
'display_description': 'test volume',
'volume_type_id': None}
self.assertEqual(4, self.driver.cli.get_lun_id(volume_01))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 2}))
def test_get_lun_id_provider_location_has_no_lun_id(self):
"""Test function get_lun_id."""
self.driverSetup()
volume_02 = {
'name': 'vol_02',
'size': 1,
'volume_name': 'vol_02',
'id': '2',
'provider_location': 'system^FNM11111|type^lun|',
'project_id': 'project',
'display_name': 'vol_02',
'display_description': 'test volume',
'volume_type_id': None}
self.assertEqual(2, self.driver.cli.get_lun_id(volume_02))
def test_create_consistency_group(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.CREATE_CONSISTENCYGROUP_CMD(cg_name),
self.testData.GET_CG_BY_NAME_CMD(cg_name)]
results = [SUCCEED, self.testData.CG_PROPERTY(cg_name)]
fake_cli = self.driverSetup(commands, results)
model_update = self.driver.create_consistencygroup(
None, self.testData.test_cg)
self.assertDictMatch({'status': 'available'}, model_update)
expect_cmd = [
mock.call(
*self.testData.CREATE_CONSISTENCYGROUP_CMD(
cg_name), poll=False),
mock.call(
*self.testData.GET_CG_BY_NAME_CMD(cg_name))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_consistency_group_retry(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.CREATE_CONSISTENCYGROUP_CMD(cg_name),
self.testData.GET_CG_BY_NAME_CMD(cg_name)]
results = [SUCCEED,
[self.testData.CG_NOT_FOUND(),
self.testData.CG_PROPERTY(cg_name)]]
fake_cli = self.driverSetup(commands, results)
model_update = self.driver.create_consistencygroup(
None, self.testData.test_cg)
self.assertDictMatch({'status': 'available'}, model_update)
expect_cmd = [
mock.call(
*self.testData.CREATE_CONSISTENCYGROUP_CMD(
cg_name), poll=False),
mock.call(
*self.testData.GET_CG_BY_NAME_CMD(cg_name)),
mock.call(
*self.testData.GET_CG_BY_NAME_CMD(cg_name))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.volume_types.get_volume_type_extra_specs",
mock.Mock(side_effect=[{'storagetype:provisioning': 'thin'},
{'storagetype:provisioning': 'compressed'}]))
def test_create_consistency_group_failed_with_compression(self):
self.driverSetup([], [])
self.assertRaisesRegex(exception.VolumeBackendAPIException,
"Failed to create consistency group "
"consistencygroup_id "
"because VNX consistency group cannot "
"accept compressed LUNs as members.",
self.driver.create_consistencygroup,
None,
self.testData.test_cg_with_type)
def test_delete_consistency_group(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.DELETE_CONSISTENCYGROUP_CMD(cg_name),
self.testData.LUN_DELETE_CMD('vol1')]
results = [SUCCEED, SUCCEED]
fake_cli = self.driverSetup(commands, results)
self.driver.db = mock.MagicMock()
self.driver.db.volume_get_all_by_group.return_value =\
self.testData.CONSISTENCY_GROUP_VOLUMES()
self.driver.delete_consistencygroup(None,
self.testData.test_cg)
expect_cmd = [
mock.call(
*self.testData.DELETE_CONSISTENCYGROUP_CMD(
cg_name)),
mock.call(*self.testData.LUN_DELETE_CMD('vol1')),
mock.call(*self.testData.LUN_DELETE_CMD('vol1'))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
'cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_create_cgsnapshot(self, get_all_for_cgsnapshot):
cgsnapshot = self.testData.test_cgsnapshot['id']
cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
commands = [self.testData.CREATE_CG_SNAPSHOT(cg_name, cgsnapshot),
self.testData.GET_SNAP(cgsnapshot)]
results = [SUCCEED,
SUCCEED]
fake_cli = self.driverSetup(commands, results)
snapshot_obj = fake_snapshot.fake_snapshot_obj(
self.testData.SNAPS_IN_SNAP_GROUP())
snapshot_obj.consistencygroup_id = cg_name
get_all_for_cgsnapshot.return_value = [snapshot_obj]
self.driver.create_cgsnapshot(None, self.testData.test_cgsnapshot)
expect_cmd = [
mock.call(
*self.testData.CREATE_CG_SNAPSHOT(
cg_name, cgsnapshot)),
mock.call(
*self.testData.GET_SNAP(cgsnapshot))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
'cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_create_cgsnapshot_retry(self, get_all_for_cgsnapshot):
cgsnapshot = self.testData.test_cgsnapshot['id']
cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
commands = [self.testData.CREATE_CG_SNAPSHOT(cg_name, cgsnapshot),
self.testData.GET_SNAP(cgsnapshot)]
results = [SUCCEED,
[self.testData.SNAP_NOT_EXIST(), SUCCEED]]
fake_cli = self.driverSetup(commands, results)
snapshot_obj = fake_snapshot.fake_snapshot_obj(
self.testData.SNAPS_IN_SNAP_GROUP())
snapshot_obj.consistencygroup_id = cg_name
get_all_for_cgsnapshot.return_value = [snapshot_obj]
self.driver.create_cgsnapshot(None, self.testData.test_cgsnapshot)
expect_cmd = [
mock.call(
*self.testData.CREATE_CG_SNAPSHOT(
cg_name, cgsnapshot)),
mock.call(
*self.testData.GET_SNAP(cgsnapshot)),
mock.call(
*self.testData.GET_SNAP(cgsnapshot))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
'cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_delete_cgsnapshot(self, get_all_for_cgsnapshot):
snap_name = self.testData.test_cgsnapshot['id']
commands = [self.testData.DELETE_CG_SNAPSHOT(snap_name)]
results = [SUCCEED]
fake_cli = self.driverSetup(commands, results)
snapshot_obj = fake_snapshot.fake_snapshot_obj(
self.testData.SNAPS_IN_SNAP_GROUP())
cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
snapshot_obj.consistencygroup_id = cg_name
get_all_for_cgsnapshot.return_value = [snapshot_obj]
self.driver.delete_cgsnapshot(None,
self.testData.test_cgsnapshot)
expect_cmd = [
mock.call(
*self.testData.DELETE_CG_SNAPSHOT(
snap_name))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_add_volume_to_cg(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
self.testData.ADD_LUN_TO_CG_CMD('cg_id', 1),
self.testData.GET_CG_BY_NAME_CMD('cg_id')
]
results = [self.testData.LUN_PROPERTY('vol1', True),
SUCCEED,
self.testData.CG_PROPERTY('cg_id')]
fake_cli = self.driverSetup(commands, results)
self.driver.create_volume(self.testData.test_volume_cg)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1', 1,
'unit_test_pool',
None, None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.ADD_LUN_TO_CG_CMD(
'cg_id', 1), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
def test_create_cloned_volume_from_consistnecy_group(self):
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
output_dest = self.testData.LUN_PROPERTY("vol1_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
commands = [cmd_dest, cmd_dest_p, cmd_migrate,
cmd_migrate_verify]
results = [output_dest, output_dest, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.create_cloned_volume(self.testData.test_volume_clone_cg,
self.testData.test_clone_cg)
tmp_cgsnapshot = 'tmp-cgsnapshot-' + self.testData.test_volume['id']
expect_cmd = [
mock.call(
*self.testData.CREATE_CG_SNAPSHOT(cg_name, tmp_cgsnapshot)),
mock.call(
*self.testData.GET_SNAP(tmp_cgsnapshot)),
mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='vol1',
source='clone1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'), poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol1', snapName=tmp_cgsnapshot)),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.DELETE_CG_SNAPSHOT(tmp_cgsnapshot))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_volume_from_cgsnapshot(self):
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [cmd_dest, cmd_dest_np, cmd_migrate,
cmd_migrate_verify]
results = [output_dest, output_dest, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.create_volume_from_snapshot(
self.testData.volume_in_cg, self.testData.test_member_cgsnapshot)
expect_cmd = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='cgsnapshot_id')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
def test_update_consistencygroup(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name)]
results = [self.testData.CG_PROPERTY(cg_name)]
fake_cli = self.driverSetup(commands, results)
(model_update, add_vols, remove_vols) = (
self.driver.update_consistencygroup(None, self.testData.test_cg,
self.testData.
VOLUMES_NOT_IN_CG(),
self.testData.VOLUMES_IN_CG()))
expect_cmd = [
mock.call(*self.testData.REPLACE_LUNS_IN_CG_CMD(
cg_name, ['4', '5']), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
self.assertEqual('available', model_update['status'])
def test_update_consistencygroup_remove_all(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name)]
results = [self.testData.CG_PROPERTY(cg_name)]
fake_cli = self.driverSetup(commands, results)
(model_update, add_vols, remove_vols) = (
self.driver.update_consistencygroup(None, self.testData.test_cg,
None,
self.testData.VOLUMES_IN_CG()))
expect_cmd = [
mock.call(*self.testData.REMOVE_LUNS_FROM_CG_CMD(
cg_name, ['1', '3']), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
self.assertEqual('available', model_update['status'])
def test_update_consistencygroup_remove_not_in_cg(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name)]
results = [self.testData.CG_PROPERTY(cg_name)]
fake_cli = self.driverSetup(commands, results)
(model_update, add_vols, remove_vols) = (
self.driver.update_consistencygroup(None, self.testData.test_cg,
None,
self.testData.
VOLUMES_NOT_IN_CG()))
expect_cmd = [
mock.call(*self.testData.REPLACE_LUNS_IN_CG_CMD(
cg_name, ['1', '3']), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
self.assertEqual('available', model_update['status'])
def test_update_consistencygroup_error(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name),
self.testData.REPLACE_LUNS_IN_CG_CMD(
cg_name, ['1', '3'])]
results = [self.testData.CG_PROPERTY(cg_name),
self.testData.CG_REPL_ERROR()]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.update_consistencygroup,
None,
self.testData.test_cg,
[],
self.testData.VOLUMES_NOT_IN_CG())
expect_cmd = [
mock.call(*self.testData.REPLACE_LUNS_IN_CG_CMD(
cg_name, ['1', '3']), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
def test_create_consistencygroup_from_cgsnapshot(self):
output_migrate_verify = ('The specified source LUN '
'is not currently migrating.', 23)
new_cg = self.testData.test_cg.copy()
new_cg.update(
{'id': 'new_cg_id'})
vol1_in_new_cg = self.testData.test_volume_cg.copy()
vol1_in_new_cg.update(
{'name': 'vol1_in_cg',
'id': '111111',
'consistencygroup_id': 'new_cg_id',
'provider_location': None})
vol2_in_new_cg = self.testData.test_volume_cg.copy()
vol2_in_new_cg.update(
{'name': 'vol2_in_cg',
'id': '222222',
'consistencygroup_id': 'new_cg_id',
'provider_location': None})
src_cgsnap = self.testData.test_cgsnapshot
snap1_in_src_cgsnap = self.testData.test_member_cgsnapshot.copy()
snap1_in_src_cgsnap.update(
{'volume': self.testData.test_volume,
'volume_name': 'src_vol1'})
snap2_in_src_cgsnap = self.testData.test_member_cgsnapshot.copy()
snap2_in_src_cgsnap.update(
{'volume': self.testData.test_volume2,
'volume_name': 'src_vol2'})
copied_snap_name = 'temp_snapshot_for_%s' % new_cg['id']
td = self.testData
commands = [td.SNAP_COPY_CMD(src_cgsnap['id'], copied_snap_name),
td.ALLOW_READWRITE_ON_SNAP_CMD(copied_snap_name),
td.SNAP_MP_CREATE_CMD(vol1_in_new_cg['name'],
snap1_in_src_cgsnap['volume_name']),
td.SNAP_ATTACH_CMD(vol1_in_new_cg['name'],
copied_snap_name),
td.LUN_CREATION_CMD(vol1_in_new_cg['name'] + '_dest',
vol1_in_new_cg['size'],
'unit_test_pool', 'thin', None),
td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name'] + '_dest'),
td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']),
td.MIGRATION_CMD(6231, 1),
td.SNAP_MP_CREATE_CMD(vol2_in_new_cg['name'],
snap2_in_src_cgsnap['volume_name']),
td.SNAP_ATTACH_CMD(vol2_in_new_cg['name'],
copied_snap_name),
td.LUN_CREATION_CMD(vol2_in_new_cg['name'] + '_dest',
vol2_in_new_cg['size'],
'unit_test_pool', 'thin', None),
td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name'] + '_dest'),
td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']),
td.MIGRATION_CMD(6232, 2),
td.MIGRATION_VERIFY_CMD(6231),
td.MIGRATION_VERIFY_CMD(6232),
td.CREATE_CONSISTENCYGROUP_CMD(new_cg['id'], [6231, 6232]),
td.DELETE_CG_SNAPSHOT(copied_snap_name)
]
results = [SUCCEED, SUCCEED, SUCCEED, SUCCEED, SUCCEED,
td.LUN_PROPERTY(vol1_in_new_cg['name'] + '_dest',
lunid=1),
td.LUN_PROPERTY(vol1_in_new_cg['name'], lunid=6231),
SUCCEED, SUCCEED, SUCCEED, SUCCEED,
td.LUN_PROPERTY(vol2_in_new_cg['name'] + '_dest',
lunid=2),
td.LUN_PROPERTY(vol2_in_new_cg['name'], lunid=6232),
SUCCEED, output_migrate_verify, output_migrate_verify,
SUCCEED, SUCCEED]
fake_cli = self.driverSetup(commands, results)
cg_model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
None, new_cg, [vol1_in_new_cg, vol2_in_new_cg],
cgsnapshot=src_cgsnap, snapshots=[snap1_in_src_cgsnap,
snap2_in_src_cgsnap],
source_cg=None, source_vols=None))
self.assertEqual(2, len(volumes_model_update))
self.assertTrue('id^%s' % 6231 in
volumes_model_update[0]['provider_location'])
self.assertTrue('id^%s' % 6232 in
volumes_model_update[1]['provider_location'])
expect_cmd = [
mock.call(*td.SNAP_COPY_CMD(src_cgsnap['id'], copied_snap_name)),
mock.call(*td.ALLOW_READWRITE_ON_SNAP_CMD(copied_snap_name)),
mock.call(*td.SNAP_MP_CREATE_CMD(vol1_in_new_cg['name'],
snap1_in_src_cgsnap['volume_name']),
poll=False),
mock.call(*td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']),
poll=True),
mock.call(*td.SNAP_ATTACH_CMD(vol1_in_new_cg['name'],
copied_snap_name)),
mock.call(*td.LUN_CREATION_CMD(vol1_in_new_cg['name'] + '_dest',
vol1_in_new_cg['size'],
'unit_test_pool', 'thick', None)),
mock.call(*td.LUN_PROPERTY_ALL_CMD(
vol1_in_new_cg['name'] + '_dest'), poll=False),
mock.call(*td.LUN_PROPERTY_ALL_CMD(
vol1_in_new_cg['name'] + '_dest'), poll=False),
mock.call(*td.MIGRATION_CMD(6231, 1),
poll=True, retry_disable=True),
mock.call(*td.SNAP_MP_CREATE_CMD(vol2_in_new_cg['name'],
snap2_in_src_cgsnap['volume_name']),
poll=False),
mock.call(*td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']),
poll=True),
mock.call(*td.SNAP_ATTACH_CMD(vol2_in_new_cg['name'],
copied_snap_name)),
mock.call(*td.LUN_CREATION_CMD(vol2_in_new_cg['name'] + '_dest',
vol2_in_new_cg['size'],
'unit_test_pool', 'thick', None)),
mock.call(*td.LUN_PROPERTY_ALL_CMD(
vol2_in_new_cg['name'] + '_dest'), poll=False),
mock.call(*td.LUN_PROPERTY_ALL_CMD(
vol2_in_new_cg['name'] + '_dest'), poll=False),
mock.call(*td.MIGRATION_CMD(6232, 2),
poll=True, retry_disable=True),
mock.call(*td.MIGRATION_VERIFY_CMD(6232), poll=True),
mock.call(*td.MIGRATION_VERIFY_CMD(6231), poll=True),
mock.call(*td.CREATE_CONSISTENCYGROUP_CMD(
new_cg['id'], [6232, 6231]), poll=True),
mock.call(*td.GET_CG_BY_NAME_CMD(new_cg['id'])),
mock.call(*td.DELETE_CG_SNAPSHOT(copied_snap_name))]
self.assertEqual(expect_cmd, fake_cli.call_args_list)
def test_create_consistencygroup_from_othersource(self):
new_cg = self.testData.test_cg.copy()
new_cg.update(
{'id': 'new_cg_id'})
vol1_in_new_cg = self.testData.test_volume_cg.copy()
vol1_in_new_cg.update(
{'name': 'vol1_in_cg',
'id': '111111',
'consistencygroup_id': 'new_cg_id',
'provider_location': None})
vol2_in_new_cg = self.testData.test_volume_cg.copy()
vol2_in_new_cg.update(
{'name': 'vol2_in_cg',
'id': '222222',
'consistencygroup_id': 'new_cg_id',
'provider_location': None})
self.driverSetup()
self.assertRaises(
exception.InvalidInput,
self.driver.create_consistencygroup_from_src,
new_cg, [vol1_in_new_cg, vol2_in_new_cg],
None, None, None, None)
def test_create_cg_from_cgsnapshot_migrate_failed(self):
new_cg = self.testData.test_cg.copy()
new_cg.update(
{'id': 'new_cg_id'})
vol1_in_new_cg = self.testData.test_volume_cg.copy()
vol1_in_new_cg.update(
{'name': 'vol1_in_cg',
'id': '111111',
'consistencygroup_id': 'new_cg_id',
'provider_location': None})
vol2_in_new_cg = self.testData.test_volume_cg.copy()
vol2_in_new_cg.update(
{'name': 'vol2_in_cg',
'id': '222222',
'consistencygroup_id': 'new_cg_id',
'provider_location': None})
src_cgsnap = self.testData.test_cgsnapshot
snap1_in_src_cgsnap = self.testData.test_member_cgsnapshot.copy()
snap1_in_src_cgsnap.update(
{'volume': self.testData.test_volume,
'volume_name': 'src_vol1'})
snap2_in_src_cgsnap = self.testData.test_member_cgsnapshot.copy()
snap2_in_src_cgsnap.update(
{'volume': self.testData.test_volume2,
'volume_name': 'src_vol2'})
copied_snap_name = 'temp_snapshot_for_%s' % new_cg['id']
td = self.testData
commands = [td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name'] + '_dest'),
td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']),
td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name'] + '_dest'),
td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']),
td.MIGRATION_CMD(6232, 2)]
results = [td.LUN_PROPERTY(vol1_in_new_cg['name'] + '_dest',
lunid=1),
td.LUN_PROPERTY(vol1_in_new_cg['name'], lunid=6231),
td.LUN_PROPERTY(vol2_in_new_cg['name'] + '_dest',
lunid=2),
td.LUN_PROPERTY(vol2_in_new_cg['name'], lunid=6232),
FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaisesRegex(exception.VolumeBackendAPIException,
'Migrate volume failed',
self.driver.create_consistencygroup_from_src,
None, new_cg, [vol1_in_new_cg, vol2_in_new_cg],
cgsnapshot=src_cgsnap,
snapshots=[snap1_in_src_cgsnap,
snap2_in_src_cgsnap],
source_cg=None, source_vols=None)
expect_cmd = [
mock.call(*self.testData.LUN_DELETE_CMD(
vol2_in_new_cg['name'] + '_dest')),
mock.call('lun', '-detach', '-name', vol2_in_new_cg['name'], '-o'),
mock.call(*self.testData.LUN_DELETE_CMD(vol2_in_new_cg['name'])),
mock.call(*self.testData.LUN_DELETE_CMD(
vol1_in_new_cg['name'] + '_dest')),
mock.call('lun', '-detach', '-name', vol1_in_new_cg['name'], '-o'),
mock.call(*self.testData.LUN_DELETE_CMD(vol1_in_new_cg['name'])),
mock.call(*td.SNAP_DELETE_CMD(copied_snap_name), poll=True)]
fake_cli.assert_has_calls(expect_cmd)
def test_deregister_initiator(self):
fake_cli = self.driverSetup()
self.driver.cli.destroy_empty_sg = True
self.driver.cli.itor_auto_dereg = True
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {1: 16}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
lun_info = {'lun_name': "unit_test_lun",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready"}
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
cli_helper.disconnect_host_from_storage_group = mock.Mock()
cli_helper.delete_storage_group = mock.Mock()
self.driver.terminate_connection(self.testData.test_volume,
self.testData.connector)
expect_cmd = [
mock.call('port', '-removeHBA', '-hbauid',
self.testData.connector['initiator'],
'-o')]
fake_cli.assert_has_calls(expect_cmd)
def test_unmanage(self):
self.driverSetup()
try:
self.driver.unmanage(self.testData.test_volume)
except NotImplementedError:
self.fail('Interface unmanage need to be implemented')
@mock.patch("random.shuffle", mock.Mock())
def test_find_available_iscsi_targets_without_pingnode(self):
self.configuration.iscsi_initiators = None
self.driverSetup()
port_a1 = {'Port WWN': 'fake_iqn_a1',
'SP': 'A',
'Port ID': 1,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_a1'}
port_a2 = {'Port WWN': 'fake_iqn_a2',
'SP': 'A',
'Port ID': 2,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_a2'}
port_b1 = {'Port WWN': 'fake_iqn_b1',
'SP': 'B',
'Port ID': 1,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_b1'}
all_targets = {'A': [port_a1, port_a2],
'B': [port_b1]}
targets = self.driver.cli._client.find_available_iscsi_targets(
'fakehost',
{('A', 2, 0), ('B', 1, 0)},
all_targets)
self.assertTrue(port_a2 in targets)
self.assertTrue(port_b1 in targets)
@mock.patch.object(emc_vnx_cli.CommandLineHelper,
'ping_node')
def test_find_available_iscsi_targets_with_pingnode(self, ping_node):
self.configuration.iscsi_initiators = (
'{"fakehost": ["10.0.0.2"]}')
self.driverSetup()
port_a1 = {'Port WWN': 'fake_iqn_a1',
'SP': 'A',
'Port ID': 1,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_a1'}
port_a2 = {'Port WWN': 'fake_iqn_a2',
'SP': 'A',
'Port ID': 2,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_a2'}
port_b1 = {'Port WWN': 'fake_iqn_b1',
'SP': 'B',
'Port ID': 1,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_b1'}
all_targets = {'A': [port_a1, port_a2],
'B': [port_b1]}
ping_node.side_effect = [False, False, True]
targets = self.driver.cli._client.find_available_iscsi_targets(
'fakehost',
{('A', 2, 0), ('A', 1, 0), ('B', 1, 0)},
all_targets)
self.assertTrue(port_a1 in targets)
self.assertTrue(port_a2 in targets)
self.assertTrue(port_b1 in targets)
@mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
'EMCVnxCliBase.get_lun_owner',
mock.Mock(return_value='A'))
@mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
'CommandLineHelper.get_registered_spport_set',
mock.Mock())
@mock.patch.object(emc_vnx_cli.CommandLineHelper,
'find_available_iscsi_targets')
def test_vnx_get_iscsi_properties(self, find_available_iscsi_targets):
self.driverSetup()
port_a1 = {'Port WWN': 'fake_iqn_a1',
'SP': 'A',
'Port ID': 1,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_a1'}
port_b1 = {'Port WWN': 'fake_iqn_b1',
'SP': 'B',
'Port ID': 1,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_b1'}
find_available_iscsi_targets.return_value = [port_a1, port_b1]
connect_info = self.driver.cli.vnx_get_iscsi_properties(
self.testData.test_volume, self.testData.connector, 1, '')
expected_info = {
'target_discovered': True,
'target_iqns': [
'fake_iqn_a1',
'fake_iqn_b1'],
'target_iqn': 'fake_iqn_a1',
'target_luns': [1, 1],
'target_lun': 1,
'target_portals': [
'fake_ip_a1:3260',
'fake_ip_b1:3260'],
'target_portal': 'fake_ip_a1:3260',
'volume_id': '1'}
self.assertEqual(expected_info, connect_info)
def test_update_migrated_volume(self):
self.driverSetup()
expected_update = {'metadata': {'lun_type': 'lun'}}
model_update = self.driver.update_migrated_volume(
None, self.testData.test_volume,
self.testData.test_volume2, 'available')
self.assertDictMatch(expected_update, model_update)
class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
def setUp(self):
super(EMCVNXCLIDArrayBasedDriverTestCase, self).setUp()
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': None,
'volume_backend_name': 'namedbackend'})
def generate_driver(self, conf):
driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
return driver
def test_get_volume_stats(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True)]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True)]
self.driverSetup(commands, results)
stats = self.driver.get_volume_stats(True)
self.assertTrue(stats['driver_version'] == VERSION,
"driver_version is incorrect")
self.assertTrue(
stats['storage_protocol'] == 'iSCSI',
"storage_protocol is not correct")
self.assertTrue(
stats['vendor_name'] == "EMC",
"vendor name is not correct")
self.assertTrue(
stats['volume_backend_name'] == "namedbackend",
"volume backend name is not correct")
self.assertEqual(2, len(stats['pools']))
pool_stats1 = stats['pools'][0]
expected_pool_stats1 = {
'free_capacity_gb': 3105.303,
'reserved_percentage': 32,
'location_info': 'unit_test_pool|fakeSerial',
'total_capacity_gb': 3281.146,
'provisioned_capacity_gb': 536.140,
'compression_support': 'True',
'deduplication_support': 'True',
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'consistencygroup_support': 'True',
'pool_name': 'unit_test_pool',
'max_over_subscription_ratio': 20.0,
'fast_cache_enabled': 'True',
'fast_support': 'True'}
self.assertEqual(expected_pool_stats1, pool_stats1)
pool_stats2 = stats['pools'][1]
expected_pool_stats2 = {
'free_capacity_gb': 3984.768,
'reserved_percentage': 32,
'location_info': 'unit_test_pool2|fakeSerial',
'total_capacity_gb': 4099.992,
'provisioned_capacity_gb': 636.240,
'compression_support': 'True',
'deduplication_support': 'True',
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'consistencygroup_support': 'True',
'pool_name': 'unit_test_pool2',
'max_over_subscription_ratio': 20.0,
'fast_cache_enabled': 'False',
'fast_support': 'True'}
self.assertEqual(expected_pool_stats2, pool_stats2)
def test_get_volume_stats_wo_fastcache(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(False)]
results = [self.testData.NDU_LIST_RESULT_WO_LICENSE,
self.testData.POOL_GET_ALL_RESULT(False)]
self.driverSetup(commands, results)
stats = self.driver.get_volume_stats(True)
self.assertEqual(2, len(stats['pools']))
pool_stats1 = stats['pools'][0]
expected_pool_stats1 = {
'free_capacity_gb': 3105.303,
'reserved_percentage': 32,
'location_info': 'unit_test_pool|fakeSerial',
'total_capacity_gb': 3281.146,
'provisioned_capacity_gb': 536.140,
'compression_support': 'False',
'deduplication_support': 'False',
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'consistencygroup_support': 'False',
'pool_name': 'unit_test_pool',
'max_over_subscription_ratio': 20.0,
'fast_cache_enabled': 'False',
'fast_support': 'False'}
self.assertEqual(expected_pool_stats1, pool_stats1)
pool_stats2 = stats['pools'][1]
expected_pool_stats2 = {
'free_capacity_gb': 3984.768,
'reserved_percentage': 32,
'location_info': 'unit_test_pool2|fakeSerial',
'total_capacity_gb': 4099.992,
'provisioned_capacity_gb': 636.240,
'compression_support': 'False',
'deduplication_support': 'False',
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'consistencygroup_support': 'False',
'pool_name': 'unit_test_pool2',
'max_over_subscription_ratio': 20.0,
'fast_cache_enabled': 'False',
'fast_support': 'False'}
self.assertEqual(expected_pool_stats2, pool_stats2)
def test_get_volume_stats_storagepool_states(self):
commands = [self.testData.POOL_GET_ALL_CMD(False)]
results = [self.testData.POOL_GET_ALL_STATES_TEST
(['Initializing', 'Ready', 'Faulted',
'Offline', 'Deleting'])]
self.driverSetup(commands, results)
stats = self.driver.get_volume_stats(True)
self.assertTrue(
stats['pools'][0]['free_capacity_gb'] == 0,
"free_capacity_gb is incorrect")
self.assertTrue(
stats['pools'][1]['free_capacity_gb'] != 0,
"free_capacity_gb is incorrect")
self.assertTrue(
stats['pools'][2]['free_capacity_gb'] != 0,
"free_capacity_gb is incorrect")
self.assertTrue(
stats['pools'][3]['free_capacity_gb'] == 0,
"free_capacity_gb is incorrect")
self.assertTrue(
stats['pools'][4]['free_capacity_gb'] == 0,
"free_capacity_gb is incorrect")
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'deduplicated'}))
def test_create_volume_deduplicated(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type')]
results = [self.testData.LUN_PROPERTY('vol_with_type', True)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
# Case
self.driver.create_volume(self.testData.test_volume_with_type)
# Verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'deduplicated', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
def test_get_pool(self):
testVolume = self.testData.test_volume_with_type
commands = [self.testData.LUN_PROPERTY_POOL_CMD(testVolume['name'])]
results = [self.testData.LUN_PROPERTY(testVolume['name'], False)]
fake_cli = self.driverSetup(commands, results)
pool = self.driver.get_pool(testVolume)
self.assertEqual('unit_test_pool', pool)
fake_cli.assert_has_calls(
[mock.call(*self.testData.LUN_PROPERTY_POOL_CMD(
testVolume['name']), poll=False)])
def test_get_target_pool_for_cloned_volme(self):
testSrcVolume = self.testData.test_volume
testNewVolume = self.testData.test_volume2
fake_cli = self.driverSetup()
pool = self.driver.cli.get_target_storagepool(testNewVolume,
testSrcVolume)
self.assertEqual('unit_test_pool', pool)
self.assertFalse(fake_cli.called)
def test_get_target_pool_for_clone_legacy_volme(self):
testSrcVolume = self.testData.test_legacy_volume
testNewVolume = self.testData.test_volume2
commands = [self.testData.LUN_PROPERTY_POOL_CMD(testSrcVolume['name'])]
results = [self.testData.LUN_PROPERTY(testSrcVolume['name'], False)]
fake_cli = self.driverSetup(commands, results)
pool = self.driver.cli.get_target_storagepool(testNewVolume,
testSrcVolume)
self.assertEqual('unit_test_pool', pool)
fake_cli.assert_has_calls(
[mock.call(*self.testData.LUN_PROPERTY_POOL_CMD(
testSrcVolume['name']), poll=False)])
def test_manage_existing_get_size(self):
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-userCap', '-owner',
'-attachedSnapshot', '-poolName')
test_size = 2
commands = [get_lun_cmd]
results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
fake_cli = self.driverSetup(commands, results)
test_volume = self.testData.test_volume2.copy()
test_volume['host'] = "host@backendsec#unit_test_pool"
get_size = self.driver.manage_existing_get_size(
test_volume,
self.testData.test_existing_ref)
expected = [mock.call(*get_lun_cmd, poll=True)]
self.assertEqual(test_size, get_size)
fake_cli.assert_has_calls(expected)
def test_manage_existing_get_size_incorrect_pool(self):
"""Test manage_existing function of driver with an invalid pool."""
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-userCap', '-owner',
'-attachedSnapshot', '-poolName')
commands = [get_lun_cmd]
results = [self.testData.LUN_PROPERTY('lun_name')]
fake_cli = self.driverSetup(commands, results)
test_volume = self.testData.test_volume2.copy()
test_volume['host'] = "host@backendsec#fake_pool"
ex = self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
test_volume,
self.testData.test_existing_ref)
self.assertTrue(
re.match(r'.*not managed by the host',
ex.msg))
expected = [mock.call(*get_lun_cmd, poll=True)]
fake_cli.assert_has_calls(expected)
def test_manage_existing(self):
lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
'-newName', 'vol_with_type', '-o')
commands = [lun_rename_cmd]
results = [SUCCEED]
fake_cli = self.driverSetup(commands, results)
self.driver.manage_existing(
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
expected = [mock.call(*lun_rename_cmd, poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'Compressed',
'storagetype:pool': 'unit_test_pool'}))
def test_create_compression_volume(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.stats['compression_support'] = 'True'
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
# Case
self.driver.create_volume(self.testData.test_volume_with_type)
# Verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'compressed', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=True),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
1))]
fake_cli.assert_has_calls(expect_cmd)
def test_get_registered_spport_set(self):
self.driverSetup()
spport_set = self.driver.cli._client.get_registered_spport_set(
'iqn.1993-08.org.debian:01:222', 'fakehost',
self.testData.STORAGE_GROUP_HAS_MAP_ISCSI('fakehost')[0])
self.assertEqual({('A', 2, 0), ('A', 0, 0), ('B', 2, 0)}, spport_set)
def test_validate_iscsi_port(self):
self.driverSetup()
port_list = (
"SP: A\n"
"Port ID: 6\n"
"Port WWN: iqn.fake.a6\n"
"iSCSI Alias: 1111.a6\n"
"\n"
"Virtual Port ID: 0\n"
"VLAN ID: Disabled\n"
"\n"
"SP: B\n"
"Port ID: 7\n"
"Port WWN: iqn.fake.b7\n"
"iSCSI Alias: 0235.b7"
"\n"
"Virtual Port ID: 0\n"
"VLAN ID: Disabled\n"
"\n"
"Virtual Port ID: 1\n"
"VLAN ID: 200\n"
"\n\n")
self.assertFalse(self.driver.cli._validate_iscsi_port(
'A', 5, 0, port_list))
self.assertTrue(self.driver.cli._validate_iscsi_port(
'A', 6, 0, port_list))
self.assertFalse(self.driver.cli._validate_iscsi_port(
'A', 6, 2, port_list))
self.assertTrue(self.driver.cli._validate_iscsi_port(
'B', 7, 1, port_list))
self.assertTrue(self.driver.cli._validate_iscsi_port(
'B', 7, 0, port_list))
self.assertFalse(self.driver.cli._validate_iscsi_port(
'B', 7, 2, port_list))
class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
def generate_driver(self, conf):
return emc_cli_fc.EMCCLIFCDriver(configuration=conf)
@mock.patch(
"oslo_concurrency.processutils.execute",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_fc_auto_reg(self):
# Test for auto registration
test_volume = self.testData.test_volume.copy()
test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
self.configuration.initiator_auto_registration = True
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
self.driver.initialize_connection(
test_volume,
self.testData.connector)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('port', '-list', '-sp'),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:90'
':12:34:56', 'A', '0', None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:90'
':12:34:56', 'B', '2', None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78:90'
':54:32:16', 'A', '0', None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78:90'
':54:32:16', 'B', '2', None, '10.0.0.2')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o',
poll=False),
mock.call('port', '-list', '-gname', 'fakehost')
]
fake_cli.assert_has_calls(expected)
# Test for manaul registration
self.configuration.initiator_auto_registration = False
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_NO_MAP('fakehost')],
('', 0),
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
self.driver.initialize_connection(
test_volume,
self.testData.connector)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
'-gname', 'fakehost', '-o', poll=False),
mock.call('port', '-list', '-gname', 'fakehost')
]
fake_cli.assert_has_calls(expected)
@mock.patch(
"cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_fc_auto_zoning(self):
# Test for auto zoning
self.configuration.zoning_mode = 'fabric'
self.configuration.initiator_auto_registration = False
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
self.testData.GETFCPORT_CMD()]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
('', 0),
self.testData.FC_PORTS]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.zonemanager_lookup_service =\
fc_service.FCSanLookupService(configuration=self.configuration)
conn_info = self.driver.initialize_connection(
self.testData.test_volume,
self.testData.connector)
self.assertEqual(EMCVNXCLIDriverTestData.i_t_map,
conn_info['data']['initiator_target_map'])
self.assertEqual(['1122334455667777'],
conn_info['data']['target_wwn'])
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
'-gname', 'fakehost', '-o',
poll=False),
mock.call('storagegroup', '-list', '-gname', 'fakehost',
poll=True),
mock.call('port', '-list', '-sp')]
fake_cli.assert_has_calls(expected)
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_fc_white_list(self):
self.configuration.io_port_list = 'a-0,B-2'
test_volume = self.testData.test_volume.copy()
test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
self.configuration.initiator_auto_registration = True
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_HAS_MAP_ISCSI('fakehost')],
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
data = self.driver.initialize_connection(
test_volume,
self.testData.connector)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:'
'90:12:34:56', 'A', 0, None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:'
'90:12:34:56', 'B', 2, None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78'
':90:54:32:16', 'A', 0, None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78'
':90:54:32:16', 'B', 2, None, '10.0.0.2')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o',
poll=False),
mock.call('port', '-list', '-gname', 'fakehost')]
fake_cli.assert_has_calls(expected)
self.assertEqual(['5006016A0860080F', '5006016008600195'],
data['data']['target_wwn'])
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_fc_port_registered_wl(self):
self.configuration.io_port_list = 'a-0,B-2'
test_volume = self.testData.test_volume.copy()
test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
self.configuration.initiator_auto_registration = True
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [self.testData.STORAGE_GROUP_ISCSI_FC_HBA('fakehost'),
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
data = self.driver.initialize_connection(
test_volume,
self.testData.connector)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:90'
':12:34:56', 'A', 0, None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78:'
'90:54:32:16', 'A', 0, None, '10.0.0.2')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o',
poll=False),
mock.call('port', '-list', '-gname', 'fakehost')]
fake_cli.assert_has_calls(expected)
self.assertEqual(['5006016A0860080F', '5006016008600195'],
data['data']['target_wwn'])
@mock.patch(
"cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
def test_terminate_connection_remove_zone_false(self):
self.driver = emc_cli_fc.EMCCLIFCDriver(
configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {1: 16, 2: 88, 3: 47}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.cli.zonemanager_lookup_service =\
fc_service.FCSanLookupService(configuration=self.configuration)
connection_info = self.driver.terminate_connection(
self.testData.test_volume,
self.testData.connector)
self.assertFalse(connection_info['data'],
'connection_info data should not be None.')
cli_helper.remove_hlu_from_storagegroup.assert_called_once_with(
16, self.testData.connector["host"])
@mock.patch(
"cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
def test_terminate_connection_remove_zone_true(self):
self.driver = emc_cli_fc.EMCCLIFCDriver(
configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.cli.zonemanager_lookup_service =\
fc_service.FCSanLookupService(configuration=self.configuration)
connection_info = self.driver.terminate_connection(
self.testData.test_volume,
self.testData.connector)
self.assertTrue('initiator_target_map' in connection_info['data'],
'initiator_target_map should be populated.')
self.assertEqual(EMCVNXCLIDriverTestData.i_t_map,
connection_info['data']['initiator_target_map'])
def test_get_volume_stats(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True)]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True)]
self.driverSetup(commands, results)
stats = self.driver.get_volume_stats(True)
self.assertTrue(stats['driver_version'] == VERSION,
"driver_version is incorrect")
self.assertTrue(
stats['storage_protocol'] == 'FC',
"storage_protocol is incorrect")
self.assertTrue(
stats['vendor_name'] == "EMC",
"vendor name is incorrect")
self.assertTrue(
stats['volume_backend_name'] == "namedbackend",
"volume backend name is incorrect")
pool_stats = stats['pools'][0]
expected_pool_stats = {
'free_capacity_gb': 3105.303,
'reserved_percentage': 32,
'location_info': 'unit_test_pool|fakeSerial',
'total_capacity_gb': 3281.146,
'provisioned_capacity_gb': 536.14,
'compression_support': 'True',
'deduplication_support': 'True',
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'max_over_subscription_ratio': 20.0,
'consistencygroup_support': 'True',
'pool_name': 'unit_test_pool',
'fast_cache_enabled': 'True',
'fast_support': 'True'}
self.assertEqual(expected_pool_stats, pool_stats)
def test_get_volume_stats_too_many_luns(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True),
self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True),
self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.check_max_pool_luns_threshold = True
stats = self.driver.get_volume_stats(True)
pool_stats = stats['pools'][0]
self.assertTrue(
pool_stats['free_capacity_gb'] == 0,
"free_capacity_gb is incorrect")
expect_cmd = [
mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
self.driver.cli.check_max_pool_luns_threshold = False
stats = self.driver.get_volume_stats(True)
pool_stats = stats['pools'][0]
self.assertTrue(stats['driver_version'] is not None,
"driver_version is incorrect")
self.assertTrue(
pool_stats['free_capacity_gb'] == 3105.303,
"free_capacity_gb is incorrect")
def test_deregister_initiator(self):
fake_cli = self.driverSetup()
self.driver.cli.destroy_empty_sg = True
self.driver.cli.itor_auto_dereg = True
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {1: 16}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
lun_info = {'lun_name': "unit_test_lun",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready"}
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
cli_helper.disconnect_host_from_storage_group = mock.Mock()
cli_helper.delete_storage_group = mock.Mock()
self.driver.terminate_connection(self.testData.test_volume,
self.testData.connector)
fc_itor_1 = '22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56'
fc_itor_2 = '22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16'
expect_cmd = [
mock.call('port', '-removeHBA', '-hbauid', fc_itor_1, '-o'),
mock.call('port', '-removeHBA', '-hbauid', fc_itor_2, '-o')]
fake_cli.assert_has_calls(expect_cmd)
class EMCVNXCLIToggleSPTestData(object):
def FAKE_COMMAND_PREFIX(self, sp_address):
return ('/opt/Navisphere/bin/naviseccli', '-address', sp_address,
'-user', 'sysadmin', '-password', 'sysadmin',
'-scope', 'global')
class EMCVNXCLIToggleSPTestCase(test.TestCase):
def setUp(self):
super(EMCVNXCLIToggleSPTestCase, self).setUp()
self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
self.configuration = mock.Mock(conf.Configuration)
self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
self.configuration.san_ip = '10.10.10.10'
self.configuration.san_secondary_ip = "10.10.10.11"
self.configuration.storage_vnx_pool_name = 'unit_test_pool'
self.configuration.san_login = 'sysadmin'
self.configuration.san_password = 'sysadmin'
self.configuration.default_timeout = 1
self.configuration.max_luns_per_storage_group = 10
self.configuration.destroy_empty_storage_group = 10
self.configuration.storage_vnx_authentication_type = "global"
self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
self.configuration.zoning_mode = None
self.configuration.storage_vnx_security_file_dir = ""
self.cli_client = emc_vnx_cli.CommandLineHelper(
configuration=self.configuration)
self.test_data = EMCVNXCLIToggleSPTestData()
def test_no_sp_toggle(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual("10.10.10.10", self.cli_client.active_storage_ip)
expected = [
mock.call(*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND), check_exit_code=True)]
mock_utils.assert_has_calls(expected)
def test_toggle_sp_with_server_unavailabe(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_ERROR_MSG = """\
Error occurred during HTTP request/response from the target: '10.244.213.142'.
Message : HTTP/1.1 503 Service Unavailable"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [processutils.ProcessExecutionError(
exit_code=255, stdout=FAKE_ERROR_MSG),
FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND),
check_exit_code=True),
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+ FAKE_COMMAND),
check_exit_code=True)]
mock_utils.assert_has_calls(expected)
def test_toggle_sp_with_end_of_data(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_ERROR_MSG = """\
Error occurred during HTTP request/response from the target: '10.244.213.142'.
Message : End of data stream"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [processutils.ProcessExecutionError(
exit_code=255, stdout=FAKE_ERROR_MSG),
FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND),
check_exit_code=True),
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+ FAKE_COMMAND),
check_exit_code=True)]
mock_utils.assert_has_calls(expected)
def test_toggle_sp_with_connection_refused(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_ERROR_MSG = """\
A network error occurred while trying to connect: '10.244.213.142'.
Message : Error occurred because connection refused. \
Unable to establish a secure connection to the Management Server.
"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [processutils.ProcessExecutionError(
exit_code=255, stdout=FAKE_ERROR_MSG),
FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND),
check_exit_code=True),
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+ FAKE_COMMAND),
check_exit_code=True)]
mock_utils.assert_has_calls(expected)
def test_toggle_sp_with_connection_error(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_ERROR_MSG = """\
A network error occurred while trying to connect: '192.168.1.56'.
Message : Error occurred because of time out"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [processutils.ProcessExecutionError(
exit_code=255, stdout=FAKE_ERROR_MSG),
FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND),
check_exit_code=True),
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+ FAKE_COMMAND),
check_exit_code=True)]
mock_utils.assert_has_calls(expected)
class EMCVNXCLIBackupTestCase(DriverTestCaseBase):
"""Provides cli-level and client-level mock test."""
def driverSetup(self):
self.context = context.get_admin_context()
self.driver = self.generate_driver(self.configuration)
self.driver.cli._client = mock.Mock()
self.snapshot = fake_snapshot.fake_snapshot_obj(
self.context, **self.testData.test_snapshot)
volume = fake_volume.fake_volume_obj(self.context)
self.snapshot.volume = volume
return self.driver.cli._client
def generate_driver(self, conf):
driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
return driver
@patch.object(emc_vnx_cli.EMCVnxCliBase, 'terminate_connection')
def test_terminate_connection_snapshot(self, terminate_connection):
fake_client = self.driverSetup()
connector = self.testData.connector
smp_name = 'tmp-smp-' + self.snapshot['id']
volume = {'name': smp_name}
self.driver.terminate_connection_snapshot(
self.snapshot, connector)
terminate_connection.assert_called_once_with(
volume, connector)
fake_client.detach_mount_point.assert_called_once_with(
smp_name)
@patch.object(emc_vnx_cli.EMCVnxCliBase, 'initialize_connection')
def test_initialize_connection_snapshot(self, initialize_connection):
fake_client = self.driverSetup()
connector = self.testData.connector
smp_name = 'tmp-smp-' + self.snapshot['id']
self.driver.initialize_connection_snapshot(
self.snapshot, connector)
fake_client.attach_mount_point.assert_called_once_with(
smp_name, self.snapshot['name'])
volume = {'name': smp_name, 'id': self.snapshot['id']}
initialize_connection.assert_called_once_with(
volume, connector)
def test_create_export_snapshot(self):
fake_client = self.driverSetup()
connector = self.testData.connector
smp_name = 'tmp-smp-' + self.snapshot['id']
self.driver.create_export_snapshot(
None, self.snapshot, connector)
fake_client.create_mount_point.assert_called_once_with(
self.snapshot['volume_name'], smp_name)
@patch.object(emc_vnx_cli.EMCVnxCliBase, 'delete_volume')
def test_remove_export_snapshot(self, delete_volume):
self.driverSetup()
smp_name = 'tmp-smp-' + self.snapshot['id']
self.driver.remove_export_snapshot(None, self.snapshot)
volume = {'name': smp_name, 'provider_location': None}
delete_volume.assert_called_once_with(volume, True)
class EMCVNXCLIMultiPoolsTestCase(DriverTestCaseBase):
def generate_driver(self, conf):
driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
return driver
def fake_command_execute_for_driver_setup(self, *command, **kwargv):
if command == ('connection', '-getport', '-address', '-vlanid'):
return self.testData.ALL_PORTS
elif command == ('storagepool', '-list', '-state'):
return self.testData.POOL_GET_STATE_RESULT([
{'pool_name': self.testData.test_pool_name, 'state': "Ready"},
{'pool_name': "unit_test_pool2", 'state': "Ready"},
{'pool_name': "unit_test_pool3", 'state': "Ready"},
{'pool_name': "unit_text_pool4", 'state': "Ready"}])
else:
return SUCCEED
def test_storage_pool_names_option(self):
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': "unit_test_pool, unit_test_pool3",
'volume_backend_name': 'namedbackend'})
driver = self.generate_driver(self.configuration)
self.assertEqual(set(["unit_test_pool", "unit_test_pool3"]),
driver.cli.storage_pools)
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': "unit_test_pool2,",
'volume_backend_name': 'namedbackend'})
driver = self.generate_driver(self.configuration)
self.assertEqual(set(["unit_test_pool2"]),
driver.cli.storage_pools)
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': "unit_test_pool3",
'volume_backend_name': 'namedbackend'})
driver = self.generate_driver(self.configuration)
self.assertEqual(set(["unit_test_pool3"]),
driver.cli.storage_pools)
def test_configured_pool_does_not_exist(self):
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': "unit_test_pool2, unit_test_pool_none2",
'volume_backend_name': 'namedbackend'})
driver = self.generate_driver(self.configuration)
self.assertEqual(set(["unit_test_pool2"]),
driver.cli.storage_pools)
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': "unit_test_pool_none1",
"unit_test_pool_none2"
'volume_backend_name': 'namedbackend'})
self.assertRaises(exception.VolumeBackendAPIException,
self.generate_driver,
self.configuration)
def test_no_storage_pool_is_configured(self):
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': None,
'volume_backend_name': 'namedbackend'})
driver = self.generate_driver(self.configuration)
self.assertEqual(set(),
driver.cli.storage_pools)
VNXError = emc_vnx_cli.VNXError
class VNXErrorTest(test.TestCase):
def test_has_error(self):
output = "The specified snapshot name is already in use. (0x716d8005)"
self.assertTrue(VNXError.has_error(output))
def test_has_error_with_specific_error(self):
output = "The specified snapshot name is already in use. (0x716d8005)"
has_error = VNXError.has_error(output, VNXError.SNAP_NAME_EXISTED)
self.assertTrue(has_error)
has_error = VNXError.has_error(output, VNXError.LUN_ALREADY_EXPANDED)
self.assertFalse(has_error)
def test_has_error_not_found(self):
output = "Cannot find the consistency group."
has_error = VNXError.has_error(output)
self.assertTrue(has_error)
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
def test_has_error_not_exist(self):
output = "The specified snapshot does not exist."
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
output = "The (pool lun) may not exist."
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
def test_has_error_multi_line(self):
output = """Could not retrieve the specified (pool lun).
The (pool lun) may not exist."""
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
def test_has_error_regular_string_false(self):
output = "Cannot unbind LUN because it's contained in a Storage Group."
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertFalse(has_error)
def test_has_error_multi_errors(self):
output = "Cannot unbind LUN because it's contained in a Storage Group."
has_error = VNXError.has_error(output,
VNXError.LUN_IN_SG,
VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
output = "Cannot unbind LUN because it's contained in a Storage Group."
has_error = VNXError.has_error(output,
VNXError.LUN_ALREADY_EXPANDED,
VNXError.LUN_NOT_MIGRATING)
self.assertFalse(has_error)
| 44.370544 | 79 | 0.547952 |
import os
import re
import mock
from oslo_concurrency import processutils
import six
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.emc import emc_cli_fc
from cinder.volume.drivers.emc import emc_cli_iscsi
from cinder.volume.drivers.emc import emc_vnx_cli
from cinder.zonemanager import fc_san_lookup_service as fc_service
from mock import patch
SUCCEED = ("", 0)
FAKE_ERROR_RETURN = ("FAKE ERROR", 255)
VERSION = emc_vnx_cli.EMCVnxCliBase.VERSION
class EMCVNXCLIDriverTestData(object):
test_volume = {
'status': 'creating',
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'provider_location': 'system^FNM11111|type^lun|id^1|version^05.03.00',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None
}
test_legacy_volume = {
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'provider_location': 'system^FNM11111|type^lun|id^1',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None
}
test_volume_clone_cg = {
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None,
'provider_location': 'system^FNM11111|type^lun|id^1',
}
test_volume_cg = {
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': 'cg_id'
}
test_volume_rw = {
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None,
'provider_location': 'system^FNM11111|type^lun|id^1|version^05.03.00',
}
test_volume2 = {
'name': 'vol2',
'size': 1,
'volume_name': 'vol2',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol2',
'consistencygroup_id': None,
'display_description': 'test volume',
'volume_type_id': None,
'provider_location': 'system^FNM11111|type^lun|id^1|version^05.03.00',
'volume_metadata': [{'key': 'lun_type', 'value': 'lun'}]}
volume_in_cg = {
'name': 'vol2',
'size': 1,
'volume_name': 'vol2',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'vol1_in_cg',
'provider_location': 'system^FNM11111|type^lun|id^1',
'consistencygroup_id': 'consistencygroup_id',
'display_description': 'test volume',
'volume_type_id': None}
volume2_in_cg = {
'name': 'vol2',
'size': 1,
'volume_name': 'vol2',
'id': '3',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol2_in_cg',
'provider_location': 'system^FNM11111|type^lun|id^3',
'consistencygroup_id': 'consistencygroup_id',
'display_description': 'test volume',
'volume_type_id': None}
test_volume_with_type = {
'name': 'vol_with_type',
'size': 1,
'volume_name': 'vol_with_type',
'id': '1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'thin_vol',
'consistencygroup_id': None,
'display_description': 'vol with type',
'volume_type_id': 'abc1-2320-9013-8813-8941-1374-8112-1231',
'provider_location': 'system^FNM11111|type^lun|id^1'}
test_failed_volume = {
'name': 'failed_vol1',
'size': 1,
'volume_name': 'failed_vol1',
'id': '4',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'failed_vol',
'consistencygroup_id': None,
'display_description': 'test failed volume',
'volume_type_id': None}
test_volume1_in_sg = {
'name': 'vol1_in_sg',
'size': 1,
'volume_name': 'vol1_in_sg',
'id': '4',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'failed_vol',
'display_description': 'Volume 1 in SG',
'volume_type_id': None,
'provider_location': 'system^fakesn|type^lun|id^4|version^05.03.00'}
test_volume2_in_sg = {
'name': 'vol2_in_sg',
'size': 1,
'volume_name': 'vol2_in_sg',
'id': '5',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'failed_vol',
'display_description': 'Volume 2 in SG',
'volume_type_id': None,
'provider_location': 'system^fakesn|type^lun|id^3|version^05.03.00'}
test_snapshot = {
'name': 'snapshot1',
'size': 1,
'id': '4444',
'volume_name': 'vol1',
'volume': test_volume,
'volume_size': 1,
'consistencygroup_id': None,
'cgsnapshot_id': None,
'project_id': 'project'}
test_failed_snapshot = {
'name': 'failed_snapshot',
'size': 1,
'id': '5555',
'volume_name': 'vol-vol1',
'volume': test_volume,
'volume_size': 1,
'project_id': 'project'}
test_clone = {
'name': 'clone1',
'size': 1,
'id': '2',
'volume_name': 'vol1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'clone1',
'consistencygroup_id': None,
'display_description': 'volume created from snapshot',
'volume_type_id': '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
'provider_location': 'system^fakesn|type^lun|id^2|version^05.03.00'}
test_clone_cg = {
'name': 'clone1',
'size': 1,
'id': '2',
'volume_name': 'vol1',
'provider_auth': None,
'host': "host@backendsec#unit_test_pool",
'project_id': 'project',
'display_name': 'clone1',
'consistencygroup_id': 'consistencygroup_id',
'display_description': 'volume created from snapshot',
'volume_type_id': None,
'provider_location': 'system^fakesn|type^lun|id^2|version^05.03.00'}
connector = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': ["1234567890123456", "1234567890543216"],
'wwnns': ["2234567890123456", "2234567890543216"],
'host': 'fakehost'}
test_volume3 = {
'migration_status': None, 'availability_zone': 'nova',
'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
'name': 'vol3',
'size': 2,
'status': 'available',
'volume_type_id':
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
'deleted': False,
'host': "host@backendsec#unit_test_pool",
'source_volid': None, 'provider_auth': None,
'display_name': 'vol-test02',
'attach_status': 'detached',
'volume_type': [],
'volume_attachment': [],
'provider_location':
'system^FNM11111|type^lun|id^1|version^05.03.00',
'_name_id': None, 'volume_metadata': []}
test_new_type = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning': 'thin'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
test_diff = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provisioning': ('thick', 'thin')}}
test_host = {'host': 'ubuntu-server12@pool_backend_1#POOL_SAS1',
'capabilities':
{'pool_name': 'POOL_SAS1',
'location_info': 'POOL_SAS1|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
test_volume4 = {'migration_status': None, 'availability_zone': 'nova',
'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
'name': 'vol4',
'size': 2,
'status': 'available',
'volume_type_id':
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
'deleted': False, 'provider_location':
'system^FNM11111|type^lun|id^4',
'host': 'ubuntu-server12@array_backend_1',
'source_volid': None, 'provider_auth': None,
'display_name': 'vol-test02',
'volume_attachment': [],
'attach_status': 'detached',
'volume_type': [],
'_name_id': None, 'volume_metadata': []}
test_volume5 = {'migration_status': None, 'availability_zone': 'nova',
'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25',
'name_id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25',
'name': 'vol5',
'size': 1,
'status': 'available',
'volume_type_id':
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
'deleted': False, 'provider_location':
'system^FNM11111|type^lun|id^5|version^05.02.00',
'host': 'ubuntu-server12@array_backend_1#unit_test_pool',
'source_volid': None, 'provider_auth': None,
'display_name': 'vol-test05',
'volume_attachment': [],
'attach_status': 'detached',
'volume_type': [],
'_name_id': None, 'volume_metadata': []}
test_new_type2 = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:pool': 'POOL_SAS2'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
test_diff2 = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:pool': ('POOL_SAS1', 'POOL_SAS2')}}
test_host2 = {'host': 'ubuntu-server12@array_backend_1',
'capabilities':
{'location_info': '|FNM00124500890',
'volume_backend_name': 'array_backend_1',
'storage_protocol': 'iSCSI'}}
test_cg = {'id': 'consistencygroup_id',
'name': 'group_name',
'status': 'deleting'}
test_cg_with_type = {'id': 'consistencygroup_id',
'name': 'group_name',
'status': 'creating',
'volume_type_id':
'abc1-2320-9013-8813-8941-1374-8112-1231,'
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8,'}
test_cgsnapshot = {
'consistencygroup_id': 'consistencygroup_id',
'id': 'cgsnapshot_id',
'status': 'available'}
test_member_cgsnapshot = {
'name': 'snapshot1',
'size': 1,
'id': 'cgsnapshot_id',
'volume': test_volume,
'volume_name': 'vol1',
'volume_size': 1,
'consistencygroup_id': 'consistencygroup_id',
'cgsnapshot_id': 'cgsnapshot_id',
'project_id': 'project'
}
test_lun_id = 1
test_existing_ref = {'source-id': test_lun_id}
test_existing_ref_source_name = {'source-name': 'vol1'}
test_pool_name = 'unit_test_pool'
device_map = {
'1122334455667788': {
'initiator_port_wwn_list': ['123456789012345', '123456789054321'],
'target_port_wwn_list': ['1122334455667777']}}
i_t_map = {'123456789012345': ['1122334455667777'],
'123456789054321': ['1122334455667777']}
POOL_PROPERTY_CMD = ('storagepool', '-list', '-name', 'unit_test_pool',
'-userCap', '-availableCap',
'-state', '-prcntFullThreshold')
POOL_PROPERTY_W_FASTCACHE_CMD = ('storagepool', '-list', '-name',
'unit_test_pool', '-availableCap',
'-userCap', '-state',
'-subscribedCap',
'-prcntFullThreshold',
'-fastcache')
def POOL_GET_ALL_CMD(self, withfastcache=False):
if withfastcache:
return ('storagepool', '-list', '-availableCap',
'-userCap', '-state', '-subscribedCap',
'-prcntFullThreshold',
'-fastcache')
else:
return ('storagepool', '-list', '-availableCap',
'-userCap', '-state', '-subscribedCap',
'-prcntFullThreshold')
def POOL_GET_ALL_RESULT(self, withfastcache=False):
if withfastcache:
return ("Pool Name: unit_test_pool\n"
"Pool ID: 0\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 6881061888\n"
"User Capacity (GBs): 3281.146\n"
"Available Capacity (Blocks): 6512292864\n"
"Available Capacity (GBs): 3105.303\n"
"Total Subscribed Capacity (GBs): 536.140\n"
"FAST Cache: Enabled\n"
"State: Ready\n"
"\n"
"Pool Name: unit_test_pool2\n"
"Pool ID: 1\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 8598306816\n"
"User Capacity (GBs): 4099.992\n"
"Available Capacity (Blocks): 8356663296\n"
"Available Capacity (GBs): 3984.768\n"
"Total Subscribed Capacity (GBs): 636.240\n"
"FAST Cache: Disabled\n"
"State: Ready\n", 0)
else:
return ("Pool Name: unit_test_pool\n"
"Pool ID: 0\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 6881061888\n"
"User Capacity (GBs): 3281.146\n"
"Available Capacity (Blocks): 6512292864\n"
"Available Capacity (GBs): 3105.303\n"
"Total Subscribed Capacity (GBs): 536.140\n"
"State: Ready\n"
"\n"
"Pool Name: unit_test_pool2\n"
"Pool ID: 1\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 8598306816\n"
"User Capacity (GBs): 4099.992\n"
"Available Capacity (Blocks): 8356663296\n"
"Available Capacity (GBs): 3984.768\n"
"Total Subscribed Capacity (GBs): 636.240\n"
"State: Ready\n", 0)
def POOL_GET_STATE_RESULT(self, pools):
output = []
for i, po in enumerate(pools):
if i != 0:
output.append("\n")
output.append("Pool Name: %s" % po['pool_name'])
output.append("Pool ID: %s" % i)
output.append("State: %s" % po['state'])
return ("\n".join(output), 0)
def POOL_GET_ALL_STATES_TEST(self, states=['Ready']):
output = ""
for i, stat in enumerate(states):
out = ("Pool Name: Pool_" + str(i) + "\n"
"Pool ID: " + str(i) + "\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 8598306816\n"
"User Capacity (GBs): 4099.992\n"
"Available Capacity (Blocks): 8356663296\n"
"Available Capacity (GBs): 3984.768\n"
"FAST Cache: Enabled\n"
"State: " + stat + "\n\n")
output += out
return (output, 0)
def SNAP_NOT_EXIST(self):
return ("Could not retrieve the specified (Snapshot).\n "
"The (Snapshot) may not exist", 9)
NDU_LIST_CMD = ('ndu', '-list')
NDU_LIST_RESULT = ("Name of the software package: -Compression " +
"Name of the software package: -Deduplication " +
"Name of the software package: -FAST " +
"Name of the software package: -FASTCache " +
"Name of the software package: -ThinProvisioning "
"Name of the software package: -VNXSnapshots",
0)
NDU_LIST_RESULT_WO_LICENSE = (
"Name of the software package: -Unisphere ",
0)
MIGRATE_PROPERTY_MIGRATING = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATING
Percent Complete: 50
Time Remaining: 0 second(s)
"""
MIGRATE_PROPERTY_STOPPED = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: STOPPED - Destination full
Percent Complete: 60
Time Remaining: 0 second(s)
"""
def SNAP_MP_CREATE_CMD(self, name='vol1', source='vol1'):
return ('lun', '-create', '-type', 'snap', '-primaryLunName',
source, '-name', name)
def SNAP_ATTACH_CMD(self, name='vol1', snapName='snapshot1'):
return ('lun', '-attach', '-name', name, '-snapName', snapName)
def SNAP_DELETE_CMD(self, name):
return ('snap', '-destroy', '-id', name, '-o')
def SNAP_CREATE_CMD(self, name):
return ('snap', '-create', '-res', 1, '-name', name,
'-allowReadWrite', 'yes',
'-allowAutoDelete', 'no')
def SNAP_MODIFY_CMD(self, name, rw):
return ('snap', '-modify', '-id', name, '-allowReadWrite', rw,
'-allowAutoDelete', 'yes')
def SNAP_LIST_CMD(self, res_id=1):
cmd = ('snap', '-list', '-res', res_id)
return cmd
def LUN_DELETE_CMD(self, name):
return ('lun', '-destroy', '-name', name, '-forceDetach', '-o')
def LUN_EXTEND_CMD(self, name, newsize):
return ('lun', '-expand', '-name', name, '-capacity', newsize,
'-sq', 'gb', '-o', '-ignoreThresholds')
def LUN_PROPERTY_POOL_CMD(self, lunname):
return ('lun', '-list', '-name', lunname, '-poolName')
def LUN_PROPERTY_ALL_CMD(self, lunname):
return ('lun', '-list', '-name', lunname,
'-state', '-status', '-opDetails', '-userCap', '-owner',
'-attachedSnapshot')
def MIGRATION_CMD(self, src_id=1, dest_id=1):
cmd = ("migrate", "-start", "-source", src_id, "-dest", dest_id,
"-rate", "high", "-o")
return cmd
def MIGRATION_VERIFY_CMD(self, src_id):
return ("migrate", "-list", "-source", src_id)
def MIGRATION_CANCEL_CMD(self, src_id):
return ("migrate", "-cancel", "-source", src_id, '-o')
def GETPORT_CMD(self):
return ("connection", "-getport", "-address", "-vlanid")
def PINGNODE_CMD(self, sp, portid, vportid, ip):
return ("connection", "-pingnode", "-sp", sp, '-portid', portid,
"-vportid", vportid, "-address", ip, '-count', '1')
def GETFCPORT_CMD(self):
return ('port', '-list', '-sp')
def CONNECTHOST_CMD(self, hostname, gname):
return ('storagegroup', '-connecthost',
'-host', hostname, '-gname', gname, '-o')
def ENABLE_COMPRESSION_CMD(self, lun_id):
return ('compression', '-on',
'-l', lun_id, '-ignoreThresholds', '-o')
def STORAGEGROUP_LIST_CMD(self, gname=None):
if gname:
return ('storagegroup', '-list',
'-gname', gname, '-host', '-iscsiAttributes')
else:
return ('storagegroup', '-list')
def STORAGEGROUP_REMOVEHLU_CMD(self, gname, hlu):
return ('storagegroup', '-removehlu',
'-hlu', hlu, '-gname', gname, '-o')
def SNAP_COPY_CMD(self, src_snap, snap_name):
return ('snap', '-copy', '-id', src_snap, '-name', snap_name,
'-ignoreMigrationCheck', '-ignoreDeduplicationCheck')
def ALLOW_READWRITE_ON_SNAP_CMD(self, snap_name):
return ('snap', '-modify', '-id', snap_name,
'-allowReadWrite', 'yes', '-allowAutoDelete', 'yes')
provisioning_values = {
'thin': ['-type', 'Thin'],
'thick': ['-type', 'NonThin'],
'compressed': ['-type', 'Thin'],
'deduplicated': ['-type', 'Thin', '-deduplication', 'on']}
tiering_values = {
'starthighthenauto': [
'-initialTier', 'highestAvailable',
'-tieringPolicy', 'autoTier'],
'auto': [
'-initialTier', 'optimizePool',
'-tieringPolicy', 'autoTier'],
'highestavailable': [
'-initialTier', 'highestAvailable',
'-tieringPolicy', 'highestAvailable'],
'lowestavailable': [
'-initialTier', 'lowestAvailable',
'-tieringPolicy', 'lowestAvailable'],
'nomovement': [
'-initialTier', 'optimizePool',
'-tieringPolicy', 'noMovement']}
def LUN_CREATION_CMD(self, name, size, pool, provisioning, tiering,
ignore_thresholds=False, poll=True):
initial = ['lun', '-create',
'-capacity', size,
'-sq', 'gb',
'-poolName', pool,
'-name', name]
if not poll:
initial = ['-np'] + initial
if provisioning:
initial.extend(self.provisioning_values[provisioning])
else:
initial.extend(self.provisioning_values['thick'])
if tiering:
initial.extend(self.tiering_values[tiering])
if ignore_thresholds:
initial.append('-ignoreThresholds')
return tuple(initial)
def CHECK_FASTCACHE_CMD(self, storage_pool):
return ('storagepool', '-list', '-name',
storage_pool, '-fastcache')
def CREATE_CONSISTENCYGROUP_CMD(self, cg_name, members=None):
create_cmd = ('snap', '-group', '-create',
'-name', cg_name, '-allowSnapAutoDelete', 'no')
if not members:
return create_cmd
else:
return create_cmd + ('-res', ','.join(map(six.text_type,
members)))
def DELETE_CONSISTENCYGROUP_CMD(self, cg_name):
return ('-np', 'snap', '-group', '-destroy',
'-id', cg_name)
def ADD_LUN_TO_CG_CMD(self, cg_name, lun_id):
return ('snap', '-group',
'-addmember', '-id', cg_name, '-res', lun_id)
def CREATE_CG_SNAPSHOT(self, cg_name, snap_name):
return ('-np', 'snap', '-create', '-res', cg_name,
'-resType', 'CG', '-name', snap_name, '-allowReadWrite',
'yes', '-allowAutoDelete', 'no')
def DELETE_CG_SNAPSHOT(self, snap_name):
return ('-np', 'snap', '-destroy', '-id', snap_name, '-o')
def GET_CG_BY_NAME_CMD(self, cg_name):
return ('snap', '-group', '-list', '-id', cg_name)
def GET_SNAP(self, snap_name):
return ('snap', '-list', '-id', snap_name)
def REMOVE_LUNS_FROM_CG_CMD(self, cg_name, remove_ids):
return ('snap', '-group', '-rmmember', '-id', cg_name, '-res',
','.join(remove_ids))
def REPLACE_LUNS_IN_CG_CMD(self, cg_name, new_ids):
return ('snap', '-group', '-replmember', '-id', cg_name, '-res',
','.join(new_ids))
def CONSISTENCY_GROUP_VOLUMES(self):
volumes = []
volumes.append(self.test_volume)
volumes.append(self.test_volume)
return volumes
def SNAPS_IN_SNAP_GROUP(self):
snaps = []
snaps.append(self.test_snapshot)
snaps.append(self.test_snapshot)
return snaps
def VOLUMES_NOT_IN_CG(self):
add_volumes = []
add_volumes.append(self.test_volume4)
add_volumes.append(self.test_volume5)
return add_volumes
def VOLUMES_IN_CG(self):
remove_volumes = []
remove_volumes.append(self.volume_in_cg)
remove_volumes.append(self.volume2_in_cg)
return remove_volumes
def CG_PROPERTY(self, cg_name):
return """
Name: %(cg_name)s
Description:
Allow auto delete: No
Member LUN ID(s): 1, 3
State: Ready
""" % {'cg_name': cg_name}, 0
def CG_NOT_FOUND(self):
return ("Cannot find the consistency group. \n\n", 13)
def CG_REPL_ERROR(self):
return """
The specified LUN is already a member
of another consistency group. (0x716d8045)
""", 71
def LUN_PREP_ERROR(self):
return ("The operation cannot be performed because "
"the LUN is 'Preparing'. Wait for the LUN's "
"Current Operation to complete 'Preparing' "
"and retry the operation. (0x712d8e0e)", 14)
POOL_PROPERTY = (
"Pool Name: unit_test_pool\n"
"Pool ID: 1\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 6881061888\n"
"User Capacity (GBs): 3281.146\n"
"Available Capacity (Blocks): 6832207872\n"
"Available Capacity (GBs): 3257.851\n"
"State: Ready\n"
"\n", 0)
POOL_PROPERTY_W_FASTCACHE = (
"Pool Name: unit_test_pool\n"
"Pool ID: 1\n"
"Percent Full Threshold: 70\n"
"User Capacity (Blocks): 6881061888\n"
"User Capacity (GBs): 3281.146\n"
"Available Capacity (Blocks): 6832207872\n"
"Available Capacity (GBs): 3257.851\n"
"Total Subscribed Capacity (GBs): 636.240\n"
"FAST Cache: Enabled\n"
"State: Ready\n\n", 0)
ALL_PORTS = ("SP: A\n" +
"Port ID: 4\n" +
"Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a4\n" +
"iSCSI Alias: 0215.a4\n\n" +
"Virtual Port ID: 0\n" +
"VLAN ID: Disabled\n" +
"IP Address: 10.244.214.118\n\n" +
"SP: A\n" +
"Port ID: 5\n" +
"Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a5\n" +
"iSCSI Alias: 0215.a5\n" +
"SP: A\n" +
"Port ID: 0\n" +
"Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a0\n" +
"iSCSI Alias: 0215.a0\n\n" +
"Virtual Port ID: 0\n" +
"VLAN ID: Disabled\n" +
"IP Address: 10.244.214.119\n\n" +
"SP: B\n" +
"Port ID: 2\n" +
"Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.b2\n" +
"iSCSI Alias: 0215.b2\n\n" +
"Virtual Port ID: 0\n" +
"VLAN ID: Disabled\n" +
"IP Address: 10.244.214.120\n\n", 0)
WHITE_LIST_PORTS = ("""SP: A
Port ID: 0
Port WWN: iqn.1992-04.com.emc:cx.fnmxxx.a0
iSCSI Alias: 0235.a7
Virtual Port ID: 0
VLAN ID: Disabled
IP Address: 192.168.3.52
SP: A
Port ID: 9
Port WWN: iqn.1992-04.com.emc:cx.fnmxxx.a9
iSCSI Alias: 0235.a9
SP: A
Port ID: 4
Port WWN: iqn.1992-04.com.emc:cx.fnmxxx.a4
iSCSI Alias: 0235.a4
SP: B
Port ID: 2
Port WWN: iqn.1992-04.com.emc:cx.fnmxxx.b2
iSCSI Alias: 0235.b6
Virtual Port ID: 0
VLAN ID: Disabled
IP Address: 192.168.4.53
""", 0)
iscsi_connection_info = {
'data': {'target_discovered': True,
'target_iqn':
'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
'target_lun': 2,
'target_portal': '10.244.214.118:3260',
'target_iqns': ['iqn.1992-04.com.emc:cx.fnm00124000215.a4'],
'target_luns': [2],
'target_portals': ['10.244.214.118:3260'],
'volume_id': '1'},
'driver_volume_type': 'iscsi'}
iscsi_connection_info_mp = {
'data': {'target_discovered': True,
'target_iqns': [
'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
'iqn.1992-04.com.emc:cx.fnm00124000215.a5'],
'target_iqn': 'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
'target_luns': [2, 2],
'target_lun': 2,
'target_portals': [
'10.244.214.118:3260',
'10.244.214.119:3260'],
'target_portal': '10.244.214.118:3260',
'volume_id': '1'},
'driver_volume_type': 'iscsi'}
PING_OK = ("Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" +
"Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" +
"Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" +
"Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n", 0)
FC_PORTS = ("Information about each SPPORT:\n" +
"\n" +
"SP Name: SP A\n" +
"SP Port ID: 0\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:60:08:60:01:95\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: YES\n" +
"Switch UID: 10:00:00:05:1E:72:EC:A6:" +
"20:46:00:05:1E:72:EC:A6\n" +
"SP Source ID: 272896\n" +
"\n" +
"SP Name: SP B\n" +
"SP Port ID: 4\n" +
"SP UID: iqn.1992-04.com.emc:cx." +
"fnm00124000215.b4\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: Not Applicable\n" +
"\n" +
"SP Name: SP A\n" +
"SP Port ID: 2\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:62:08:60:01:95\n" +
"Link Status: Down\n" +
"Port Status: Online\n" +
"Switch Present: NO\n" +
"\n" +
"SP Name: SP B\n" +
"SP Port ID: 2\n" +
"SP UID: 50:06:01:60:88:60:08:0F:"
"50:06:01:6A:08:60:08:0F\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: YES\n" +
"Switch UID: 10:00:50:EB:1A:03:3F:59:"
"20:11:50:EB:1A:03:3F:59\n" +
"SP Source ID: 69888\n", 0)
FAKEHOST_PORTS = (
"Information about each HBA:\n" +
"\n" +
"HBA UID: 20:00:00:90:FA:53:46:41:12:34:" +
"56:78:90:12:34:56\n" +
"Server Name: fakehost\n" +
"Server IP Address: 10.0.0.2" +
"HBA Model Description:\n" +
"HBA Vendor Description:\n" +
"HBA Device Driver Name:\n" +
"Information about each port of this HBA:\n\n" +
" SP Name: SP A\n" +
" SP Port ID: 0\n" +
" HBA Devicename:\n" +
" Trusted: NO\n" +
" Logged In: YES\n" +
" Defined: YES\n" +
" Initiator Type: 3\n" +
" StorageGroup Name: fakehost\n\n" +
" SP Name: SP A\n" +
" SP Port ID: 2\n" +
" HBA Devicename:\n" +
" Trusted: NO\n" +
" Logged In: YES\n" +
" Defined: YES\n" +
" Initiator Type: 3\n" +
" StorageGroup Name: fakehost\n\n" +
" SP Name: SP B\n" +
" SP Port ID: 2\n" +
" HBA Devicename:\n" +
" Trusted: NO\n" +
" Logged In: YES\n" +
" Defined: YES\n" +
" Initiator Type: 3\n" +
" StorageGroup Name: fakehost\n\n"
"Information about each SPPORT:\n" +
"\n" +
"SP Name: SP A\n" +
"SP Port ID: 0\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:60:08:60:01:95\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: YES\n" +
"Switch UID: 10:00:00:05:1E:72:EC:A6:" +
"20:46:00:05:1E:72:EC:A6\n" +
"SP Source ID: 272896\n" +
"\n" +
"SP Name: SP B\n" +
"SP Port ID: 4\n" +
"SP UID: iqn.1992-04.com.emc:cx." +
"fnm00124000215.b4\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: Not Applicable\n" +
"\n" +
"SP Name: SP A\n" +
"SP Port ID: 2\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:62:08:60:01:95\n" +
"Link Status: Down\n" +
"Port Status: Online\n" +
"Switch Present: NO\n" +
"\n" +
"SP Name: SP B\n" +
"SP Port ID: 2\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:6A:08:60:08:0F\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: YES\n" +
"Switch UID: 10:00:00:05:1E:72:EC:A6:" +
"20:46:00:05:1E:72:EC:A6\n" +
"SP Source ID: 272896\n", 0)
def LUN_PROPERTY(self, name, is_thin=False, has_snap=False, size=1,
state='Ready', faulted='false', operation='None',
lunid=1, pool_name='unit_test_pool'):
return ("""
LOGICAL UNIT NUMBER %(lunid)s
Name: %(name)s
UID: 60:06:01:60:09:20:32:00:13:DF:B4:EF:C2:63:E3:11
Current Owner: SP A
Default Owner: SP A
Allocation Owner: SP A
Attached Snapshot: %(has_snap)s
User Capacity (Blocks): 2101346304
User Capacity (GBs): %(size)d
Consumed Capacity (Blocks): 2149576704
Consumed Capacity (GBs): 1024.998
Pool Name: %(pool_name)s
Current State: %(state)s
Status: OK(0x0)
Is Faulted: %(faulted)s
Is Transitioning: false
Current Operation: %(operation)s
Current Operation State: N/A
Current Operation Status: N/A
Current Operation Percent Completed: 0
Is Thin LUN: %(is_thin)s""" % {
'lunid': lunid,
'name': name,
'has_snap': 'FakeSnap' if has_snap else 'N/A',
'size': size,
'pool_name': pool_name,
'state': state,
'faulted': faulted,
'operation': operation,
'is_thin': 'Yes' if is_thin else 'No'}, 0)
def STORAGE_GROUP_ISCSI_FC_HBA(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 4
Host name: fakehost
SPPort: A-4v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56 SP B 2
Host name: fakehost2
SPPort: B-2v0
Initiator IP: N/A
TPGT: 0
ISID: N/A
22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16 SP B 2
Host name: fakehost2
SPPort: B-2v0
Initiator IP: N/A
TPGT: 0
ISID: N/A
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
1 1
Shareable: YES""" % sgname, 0)
def STORAGE_GROUP_NO_MAP(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 27:D2:BE:C1:9B:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
Shareable: YES""" % sgname, 0)
def STORAGE_GROUP_HAS_MAP(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 4
Host name: fakehost
SPPort: A-4v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
1 1
Shareable: YES""" % sgname, 0)
def STORAGE_GROUP_HAS_MAP_ISCSI(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 2
Host name: fakehost
SPPort: A-2v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
iqn.1993-08.org.debian:01:222 SP A 0
Host name: fakehost
SPPort: A-0v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
iqn.1993-08.org.debian:01:222 SP B 2
Host name: fakehost
SPPort: B-2v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
1 1
Shareable: YES""" % sgname, 0)
def STORAGE_GROUP_HAS_MAP_MP(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 4
Host name: fakehost
SPPort: A-4v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
iqn.1993-08.org.debian:01:222 SP A 5
Host name: fakehost
SPPort: A-5v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
1 1
Shareable: YES""" % sgname, 0)
def STORAGE_GROUP_HAS_MAP_2(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 4
Host name: fakehost
SPPort: A-4v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
1 1
2 3
Shareable: YES""" % sgname, 0)
def POOL_FEATURE_INFO_POOL_LUNS_CMD(self):
cmd = ('storagepool', '-feature', '-info',
'-maxPoolLUNs', '-numPoolLUNs')
return cmd
def POOL_FEATURE_INFO_POOL_LUNS(self, max, total):
return (('Max. Pool LUNs: %s\n' % max) +
('Total Number of Pool LUNs: %s\n' % total), 0)
def STORAGE_GROUPS_HAS_MAP(self, sgname1, sgname2):
return ("""
Storage Group Name: irrelative
Storage Group UID: 9C:86:4F:30:07:76:E4:11:AC:83:C8:C0:8E:9C:D6:1F
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:5741c6307e60 SP A 6
Host name: fakehost
SPPort: A-6v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
Storage Group Name: %(sgname1)s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 4
Host name: fakehost
SPPort: A-4v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
31 3
41 4
Shareable: YES
Storage Group Name: %(sgname2)s
Storage Group UID: 9C:86:4F:30:07:76:E4:11:AC:83:C8:C0:8E:9C:D6:1F
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:5741c6307e60 SP A 6
Host name: fakehost
SPPort: A-6v0
Initiator IP: fakeip
TPGT: 3
ISID: fakeid
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
32 3
42 4
Shareable: YES""" % {'sgname1': sgname1,
'sgname2': sgname2}, 0)
def LUN_DELETE_IN_SG_ERROR(self, up_to_date=True):
if up_to_date:
return ("Cannot unbind LUN "
"because it's contained in a Storage Group",
156)
else:
return ("SP B: Request failed. "
"Host LUN/LUN mapping still exists.",
0)
def set_path_cmd(self, gname, hba, sp, spport, vport=None, ip=None):
if vport is None:
return ('storagegroup', '-setpath', '-gname', gname,
'-hbauid', hba,
'-sp', sp, '-spport', spport,
'-ip', ip, '-host', gname, '-o')
return ('storagegroup', '-setpath', '-gname', gname,
'-hbauid', hba,
'-sp', sp, '-spport', spport, '-spvport', vport,
'-ip', ip, '-host', gname, '-o')
class DriverTestCaseBase(test.TestCase):
def setUp(self):
super(DriverTestCaseBase, self).setUp()
self.stubs.Set(emc_vnx_cli.CommandLineHelper, 'command_execute',
self.fake_command_execute_for_driver_setup)
self.stubs.Set(emc_vnx_cli.CommandLineHelper, 'get_array_serial',
mock.Mock(return_value={'array_serial':
'fakeSerial'}))
self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
self.stubs.Set(emc_vnx_cli, 'INTERVAL_5_SEC', 0.01)
self.stubs.Set(emc_vnx_cli, 'INTERVAL_30_SEC', 0.01)
self.configuration = conf.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
self.configuration.san_ip = '10.0.0.1'
self.configuration.storage_vnx_pool_name = 'unit_test_pool'
self.configuration.san_login = 'sysadmin'
self.configuration.san_password = 'sysadmin'
self.configuration.initiator_auto_registration = True
self.configuration.check_max_pool_luns_threshold = False
self.stubs.Set(self.configuration, 'safe_get',
self.fake_safe_get({'storage_vnx_pool_names':
'unit_test_pool',
'volume_backend_name':
'namedbackend'}))
self.testData = EMCVNXCLIDriverTestData()
self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
'-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
self.configuration.ignore_pool_full_threshold = False
def driverSetup(self, commands=tuple(), results=tuple()):
self.driver = self.generate_driver(self.configuration)
fake_command_execute = self.get_command_execute_simulator(
commands, results)
fake_cli = mock.Mock(side_effect=fake_command_execute)
self.driver.cli._client.command_execute = fake_cli
return fake_cli
def generate_driver(self, conf):
raise NotImplementedError
def get_command_execute_simulator(self, commands=tuple(),
results=tuple()):
assert(len(commands) == len(results))
def fake_command_execute(*args, **kwargv):
for i in range(len(commands)):
if args == commands[i]:
if isinstance(results[i], list):
if len(results[i]) > 0:
ret = results[i][0]
del results[i][0]
return ret
else:
return results[i]
return self.standard_fake_command_execute(*args, **kwargv)
return fake_command_execute
def standard_fake_command_execute(self, *args, **kwargv):
standard_commands = [
self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
self.testData.LUN_PROPERTY_ALL_CMD('vol-vol1'),
self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'),
self.testData.POOL_PROPERTY_CMD]
standard_results = [
self.testData.LUN_PROPERTY('vol1'),
self.testData.LUN_PROPERTY('vol2'),
self.testData.LUN_PROPERTY('vol2_dest'),
self.testData.LUN_PROPERTY('vol-vol1'),
self.testData.LUN_PROPERTY('snapshot1'),
self.testData.POOL_PROPERTY]
standard_default = SUCCEED
for i in range(len(standard_commands)):
if args == standard_commands[i]:
return standard_results[i]
return standard_default
def fake_command_execute_for_driver_setup(self, *command, **kwargv):
if (command == ('connection', '-getport', '-address', '-vlanid') or
command == ('connection', '-getport', '-vlanid')):
return self.testData.ALL_PORTS
elif command == ('storagepool', '-list', '-state'):
return self.testData.POOL_GET_STATE_RESULT([
{'pool_name': self.testData.test_pool_name, 'state': "Ready"},
{'pool_name': "unit_test_pool2", 'state': "Ready"}])
if command == self.testData.GETFCPORT_CMD():
return self.testData.FC_PORTS
else:
return SUCCEED
def fake_safe_get(self, values):
def _safe_get(key):
return values.get(key)
return _safe_get
class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
def generate_driver(self, conf):
return emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_create_destroy_volume_without_extra_spec(self):
fake_cli = self.driverSetup()
self.driver.create_volume(self.testData.test_volume)
self.driver.delete_volume(self.testData.test_volume)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1', 1,
'unit_test_pool',
'thick', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.LUN_DELETE_CMD('vol1'))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_create_volume_ignore_thresholds(self):
self.configuration.ignore_pool_full_threshold = True
fake_cli = self.driverSetup()
self.driver.create_volume(self.testData.test_volume)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1', 1,
'unit_test_pool',
'thick', None,
ignore_thresholds=True, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'compressed'}))
def test_create_volume_compressed(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
self.driver.create_volume(self.testData.test_volume_with_type)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'compressed', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=True),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
1))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'provisioning:type': 'thin',
'storagetype:provisioning': 'thick'}))
def test_create_volume_thin(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
self.driver.create_volume(self.testData.test_volume_with_type)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'thin', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'provisioning:type': 'thick'}))
def test_create_volume_thick(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', False),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
self.driver.create_volume(self.testData.test_volume_with_type)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'thick', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'compressed',
'storagetype:tiering': 'HighestAvailable'}))
def test_create_volume_compressed_tiering_highestavailable(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
self.driver.create_volume(self.testData.test_volume_with_type)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'compressed', 'highestavailable', poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=True),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
1))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'deduplicated'}))
def test_create_volume_deduplicated(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
self.driver.create_volume(self.testData.test_volume_with_type)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'deduplicated', None, poll=False))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:tiering': 'Auto'}))
def test_create_volume_tiering_auto(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
self.driver.create_volume(self.testData.test_volume_with_type)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
None, 'auto', poll=False))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:tiering': 'Auto',
'storagetype:provisioning': 'Deduplicated'}))
def test_create_volume_deduplicated_tiering_auto(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
self.driverSetup(commands, results)
ex = self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
self.testData.test_volume_with_type)
self.assertTrue(
re.match(r".*deduplicated and auto tiering can't be both enabled",
ex.msg))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'Compressed'}))
def test_create_volume_compressed_no_enabler(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
('No package', 0)]
self.driverSetup(commands, results)
ex = self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
self.testData.test_volume_with_type)
self.assertTrue(
re.match(r".*Compression Enabler is not installed",
ex.msg))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'copytype:snap': 'true'}))
def test_create_volume_snapcopy_in_cg(self):
self.driverSetup()
vol = self.testData.test_volume_with_type.copy()
vol['consistencygroup_id'] = '7450764f-9d24-4c70-ad46-7cd90acd4292'
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
vol)
def test_get_volume_stats(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True)]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True)]
self.driverSetup(commands, results)
stats = self.driver.get_volume_stats(True)
self.assertTrue(stats['driver_version'] == VERSION,
"driver_version is incorrect")
self.assertTrue(
stats['storage_protocol'] == 'iSCSI',
"storage_protocol is incorrect")
self.assertTrue(
stats['vendor_name'] == "EMC",
"vendor name is incorrect")
self.assertTrue(
stats['volume_backend_name'] == "namedbackend",
"volume backend name is incorrect")
pool_stats = stats['pools'][0]
expected_pool_stats = {
'free_capacity_gb': 3105.303,
'reserved_percentage': 32,
'location_info': 'unit_test_pool|fakeSerial',
'total_capacity_gb': 3281.146,
'provisioned_capacity_gb': 536.14,
'compression_support': 'True',
'deduplication_support': 'True',
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'max_over_subscription_ratio': 20.0,
'consistencygroup_support': 'True',
'pool_name': 'unit_test_pool',
'fast_cache_enabled': 'True',
'fast_support': 'True'}
self.assertEqual(expected_pool_stats, pool_stats)
def test_get_volume_stats_ignore_threshold(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True)]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True)]
self.driverSetup(commands, results)
self.driver.cli.ignore_pool_full_threshold = True
stats = self.driver.get_volume_stats(True)
pool_stats = stats['pools'][0]
self.assertEqual(2, pool_stats['reserved_percentage'])
def test_get_volume_stats_reserved_percentage_from_conf(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True)]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True)]
self.configuration.reserved_percentage = 22
self.driverSetup(commands, results)
self.driver.cli.ignore_pool_full_threshold = True
stats = self.driver.get_volume_stats(True)
pool_stats = stats['pools'][0]
self.assertEqual(22, pool_stats['reserved_percentage'])
def test_get_volume_stats_too_many_luns(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True),
self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True),
self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.check_max_pool_luns_threshold = True
stats = self.driver.get_volume_stats(True)
pool_stats = stats['pools'][0]
self.assertTrue(
pool_stats['free_capacity_gb'] == 0,
"free_capacity_gb is incorrect")
expect_cmd = [
mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
self.driver.cli.check_max_pool_luns_threshold = False
stats = self.driver.get_volume_stats(True)
pool_stats = stats['pools'][0]
self.assertTrue(stats['driver_version'] is not None,
"driver_version is not returned")
self.assertTrue(
pool_stats['free_capacity_gb'] == 3105.303,
"free_capacity_gb is incorrect")
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
def test_volume_migration_timeout(self):
commands = [self.testData.MIGRATION_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1)]
FAKE_ERROR_MSG = """\
A network error occurred while trying to connect: '10.244.213.142'.
Message : Error occurred because connection refused. \
Unable to establish a secure connection to the Management Server.
"""
FAKE_ERROR_MSG = FAKE_ERROR_MSG.replace('\n', ' ')
FAKE_MIGRATE_PROPERTY = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATED
Percent Complete: 100
Time Remaining: 0 second(s)
"""
results = [(FAKE_ERROR_MSG, 255),
[(FAKE_MIGRATE_PROPERTY, 0),
(FAKE_MIGRATE_PROPERTY, 0),
('The specified source LUN is not currently migrating',
23)]]
fake_cli = self.driverSetup(commands, results)
fakehost = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume,
fakehost)[0]
self.assertTrue(ret)
# verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
def test_volume_migration(self):
commands = [self.testData.MIGRATION_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1)]
FAKE_MIGRATE_PROPERTY = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATED
Percent Complete: 100
Time Remaining: 0 second(s)
"""
results = [SUCCEED,
[(FAKE_MIGRATE_PROPERTY, 0),
('The specified source LUN is not '
'currently migrating', 23)]]
fake_cli = self.driverSetup(commands, results)
fake_host = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume,
fake_host)[0]
self.assertTrue(ret)
# verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value={'lun_id': 5}))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:tiering': 'Auto'}))
def test_volume_migration_02(self):
commands = [self.testData.MIGRATION_CMD(5, 5),
self.testData.MIGRATION_VERIFY_CMD(5)]
FAKE_MIGRATE_PROPERTY = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATED
Percent Complete: 100
Time Remaining: 0 second(s)
"""
results = [SUCCEED,
[(FAKE_MIGRATE_PROPERTY, 0),
('The specified source LUN is not currently migrating',
23)]]
fake_cli = self.driverSetup(commands, results)
fakehost = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume5,
fakehost)[0]
self.assertTrue(ret)
# verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(5, 5),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(5),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(5),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
def test_volume_migration_failed(self):
commands = [self.testData.MIGRATION_CMD()]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
fakehost = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume,
fakehost)[0]
self.assertFalse(ret)
# verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
def test_volume_migration_stopped(self):
commands = [self.testData.MIGRATION_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1),
self.testData.MIGRATION_CANCEL_CMD(1)]
results = [SUCCEED, [(self.testData.MIGRATE_PROPERTY_MIGRATING, 0),
(self.testData.MIGRATE_PROPERTY_STOPPED, 0),
('The specified source LUN is not '
'currently migrating', 23)],
SUCCEED]
fake_cli = self.driverSetup(commands, results)
fake_host = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
self.assertRaisesRegex(exception.VolumeBackendAPIException,
"Migration of LUN 1 has been stopped or"
" faulted.",
self.driver.migrate_volume,
None, self.testData.test_volume, fake_host)
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=False),
mock.call(*self.testData.MIGRATION_CANCEL_CMD(1)),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:tiering': 'Auto',
'copytype:snap': 'true'}))
def test_volume_migration_smp(self):
commands = [self.testData.MIGRATION_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1)]
FAKE_MIGRATE_PROPERTY = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATED
Percent Complete: 100
Time Remaining: 0 second(s)
"""
results = [SUCCEED,
[(FAKE_MIGRATE_PROPERTY, 0),
('The specified source LUN is not '
'currently migrating', 23)]]
fake_cli = self.driverSetup(commands, results)
fake_host = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
vol = self.testData.test_volume.copy()
vol['provider_location'] = 'system^FNM11111|type^smp|id^1'
tmp_snap = "snap-as-vol-%s" % vol['id']
ret = self.driver.migrate_volume(None,
vol,
fake_host)
self.assertTrue(ret[0])
self.assertTrue(
ret[1]['provider_location'].find('type^lun') > 0)
# verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=False),
mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
def test_create_destroy_volume_snapshot(self):
fake_cli = self.driverSetup()
# case
self.driver.create_snapshot(self.testData.test_snapshot)
self.driver.delete_snapshot(self.testData.test_snapshot)
# verification
expect_cmd = [mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1'),
poll=False),
mock.call(*self.testData.SNAP_DELETE_CMD('snapshot1'),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_snapshot_preparing_volume(self):
commands = [self.testData.SNAP_CREATE_CMD('snapshot1'),
self.testData.LUN_PROPERTY_ALL_CMD('vol1')]
results = [[self.testData.LUN_PREP_ERROR(), SUCCEED],
[self.testData.LUN_PROPERTY('vol1', size=1,
operation='Preparing'),
self.testData.LUN_PROPERTY('vol1', size=1,
operation='Optimizing'),
self.testData.LUN_PROPERTY('vol1', size=1,
operation='None')]]
fake_cli = self.driverSetup(commands, results)
self.driver.create_snapshot(self.testData.test_snapshot)
expected = [mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=True),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1'),
poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch(
"oslo_concurrency.processutils.execute",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection(self):
# Test for auto registration
self.configuration.initiator_auto_registration = True
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
self.testData.PING_OK]
fake_cli = self.driverSetup(commands, results)
connection_info = self.driver.initialize_connection(
self.testData.test_volume,
self.testData.connector)
self.assertEqual(self.testData.iscsi_connection_info,
connection_info)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call(*self.testData.set_path_cmd(
'fakehost', 'iqn.1993-08.org.debian:01:222', 'A',
4, 0, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', 'iqn.1993-08.org.debian:01:222',
'A', 0, 0, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', 'iqn.1993-08.org.debian:01:222',
'B', 2, 0, '10.0.0.2')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o',
poll=False),
mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
'10.0.0.2'))]
fake_cli.assert_has_calls(expected)
# Test for manual registration
self.configuration.initiator_auto_registration = False
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
results = [
[("No group", 83),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
('', 0),
self.testData.PING_OK
]
fake_cli = self.driverSetup(commands, results)
test_volume_rw = self.testData.test_volume_rw
connection_info = self.driver.initialize_connection(
test_volume_rw,
self.testData.connector)
self.assertEqual(self.testData.iscsi_connection_info,
connection_info)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o', poll=False),
mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
'10.0.0.2'))]
fake_cli.assert_has_calls(expected)
# Test No Ping
self.configuration.iscsi_initiators = None
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost')]
results = [
[("No group", 83),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
('', 0)]
fake_cli = self.driverSetup(commands, results)
test_volume_rw = self.testData.test_volume_rw.copy()
test_volume_rw['provider_location'] = 'system^fakesn|type^lun|id^1'
connection_info = self.driver.initialize_connection(
test_volume_rw,
self.testData.connector)
self.assertEqual(self.testData.iscsi_connection_info,
connection_info)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o', poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch('random.randint',
mock.Mock(return_value=0))
@mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
'CommandLineHelper.ping_node',
mock.Mock(return_value=True))
@mock.patch('random.shuffle', mock.Mock(return_value=0))
def test_initialize_connection_multipath(self):
self.configuration.initiator_auto_registration = False
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost')]
results = [self.testData.STORAGE_GROUP_HAS_MAP_MP('fakehost')]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.iscsi_targets = {
'A': [
{'Port WWN': 'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
'SP': 'A',
'Port ID': 4,
'Virtual Port ID': 0,
'IP Address': '10.244.214.118'},
{'Port WWN': 'iqn.1992-04.com.emc:cx.fnm00124000215.a5',
'SP': 'A',
'Port ID': 5,
'Virtual Port ID': 0,
'IP Address': '10.244.214.119'}],
'B': []}
test_volume_rw = self.testData.test_volume_rw.copy()
test_volume_rw['provider_location'] = 'system^fakesn|type^lun|id^1'
connector_m = dict(self.testData.connector)
connector_m['multipath'] = True
connection_info = self.driver.initialize_connection(
test_volume_rw,
connector_m)
self.assertEqual(self.testData.iscsi_connection_info_mp,
connection_info)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o', poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch(
"oslo_concurrency.processutils.execute",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
return_value=3))
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_exist(self):
# Test for auto registration
self.configuration.initiator_auto_registration = True
self.configuration.max_luns_per_storage_group = 2
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
('storagegroup', '-addhlu', '-hlu', 2, '-alu', 3,
'-gname', 'fakehost', '-o'),
self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
results = [[self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost')],
("fakeerror", 23),
self.testData.PING_OK]
fake_cli = self.driverSetup(commands, results)
iscsi_data = self.driver.initialize_connection(
self.testData.test_volume,
self.testData.connector
)
self.assertTrue(iscsi_data['data']['target_lun'] == 2,
"iSCSI initialize connection returned wrong HLU")
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 3,
'-gname', 'fakehost', '-o',
poll=False),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
'10.0.0.2'))]
fake_cli.assert_has_calls(expected)
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_iscsi_white_list(self):
self.configuration.io_port_list = 'a-0-0,B-2-0'
test_volume = self.testData.test_volume.copy()
test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
# Test for auto registration
self.configuration.initiator_auto_registration = True
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_HAS_MAP_ISCSI('fakehost')]]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.iscsi_targets = {'A': [{'SP': 'A', 'Port ID': 0,
'Virtual Port ID': 0,
'Port WWN': 'fake_iqn',
'IP Address': '192.168.1.1'}],
'B': [{'SP': 'B', 'Port ID': 2,
'Virtual Port ID': 0,
'Port WWN': 'fake_iqn1',
'IP Address': '192.168.1.2'}]}
self.driver.initialize_connection(
test_volume,
self.testData.connector)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call(*self.testData.set_path_cmd(
'fakehost', 'iqn.1993-08.org.debian:01:222',
'A', 0, 0, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', 'iqn.1993-08.org.debian:01:222',
'B', 2, 0, '10.0.0.2')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o',
poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
'EMCVnxCliBase._build_pool_stats',
mock.Mock(return_value=None))
@mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
'CommandLineHelper.get_pool',
mock.Mock(return_value={'total_capacity_gb': 0.0,
'free_capacity_gb': 0.0}))
def test_update_iscsi_io_ports(self):
self.configuration.io_port_list = 'a-0-0,B-2-0'
# Test for auto registration
self.configuration.initiator_auto_registration = True
commands = [self.testData.GETPORT_CMD()]
results = [self.testData.WHITE_LIST_PORTS]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.update_volume_stats()
expected = [mock.call(*self.testData.GETPORT_CMD(), poll=False)]
fake_cli.assert_has_calls(expected)
io_ports = self.driver.cli.iscsi_targets
self.assertEqual((0, 'iqn.1992-04.com.emc:cx.fnmxxx.a0'),
(io_ports['A'][0]['Port ID'],
io_ports['A'][0]['Port WWN']))
self.assertEqual((2, 'iqn.1992-04.com.emc:cx.fnmxxx.b2'),
(io_ports['B'][0]['Port ID'],
io_ports['B'][0]['Port WWN']))
@mock.patch(
"oslo_concurrency.processutils.execute",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
return_value=4))
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_no_hlu_left_1(self):
# Test for auto registration
self.configuration.initiator_auto_registration = True
self.configuration.max_luns_per_storage_group = 2
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
('storagegroup', '-addhlu', '-hlu', 2, '-alu', 4,
'-gname', 'fakehost', '-o'),
self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
results = [[self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
("", 0),
self.testData.PING_OK]
fake_cli = self.driverSetup(commands, results)
iscsi_data = self.driver.initialize_connection(
self.testData.test_volume,
self.testData.connector)
self.assertTrue(iscsi_data['data']['target_lun'] == 2,
"iSCSI initialize connection returned wrong HLU")
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 4,
'-gname', 'fakehost', '-o',
poll=False),
mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
u'10.0.0.2'))]
fake_cli.assert_has_calls(expected)
@mock.patch(
"oslo_concurrency.processutils.execute",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
return_value=4))
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_no_hlu_left_2(self):
# Test for auto registration
self.configuration.initiator_auto_registration = True
self.configuration.max_luns_per_storage_group = 2
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost')]
results = [
[self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost')]
]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
self.testData.test_volume,
self.testData.connector)
expected = [
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
]
fake_cli.assert_has_calls(expected)
@mock.patch('os.path.exists', return_value=True)
def test_terminate_connection(self, _mock_exists):
self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {1: 16, 2: 88, 3: 47}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
lun_info = {'lun_name': "unit_test_lun",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready"}
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.terminate_connection(self.testData.test_volume,
self.testData.connector)
cli_helper.remove_hlu_from_storagegroup.assert_called_once_with(
16, self.testData.connector["host"])
def test_create_volume_cli_failed(self):
commands = [self.testData.LUN_CREATION_CMD(
'failed_vol1', 1, 'unit_test_pool', None, None, poll=False)]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.create_volume,
self.testData.test_failed_volume)
expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD(
'failed_vol1', 1, 'unit_test_pool', None, None, poll=False))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_create_faulted_volume(self):
volume_name = 'faulted_volume'
cmd_create = self.testData.LUN_CREATION_CMD(
volume_name, 1, 'unit_test_pool', None, None, poll=False)
cmd_list_preparing = self.testData.LUN_PROPERTY_ALL_CMD(volume_name)
commands = [cmd_create, cmd_list_preparing]
results = [SUCCEED,
[self.testData.LUN_PROPERTY(name=volume_name,
state='Faulted',
faulted='true',
operation='Preparing'),
self.testData.LUN_PROPERTY(name=volume_name,
state='Faulted',
faulted='true',
operation='None')]]
fake_cli = self.driverSetup(commands, results)
faulted_volume = self.testData.test_volume.copy()
faulted_volume.update({'name': volume_name})
self.driver.create_volume(faulted_volume)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
volume_name, 1, 'unit_test_pool', None, None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(volume_name),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(volume_name),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_create_offline_volume(self):
volume_name = 'offline_volume'
cmd_create = self.testData.LUN_CREATION_CMD(
volume_name, 1, 'unit_test_pool', None, None, poll=False)
cmd_list = self.testData.LUN_PROPERTY_ALL_CMD(volume_name)
commands = [cmd_create, cmd_list]
results = [SUCCEED,
self.testData.LUN_PROPERTY(name=volume_name,
state='Offline',
faulted='true')]
self.driverSetup(commands, results)
offline_volume = self.testData.test_volume.copy()
offline_volume.update({'name': volume_name})
self.assertRaisesRegex(exception.VolumeBackendAPIException,
"Volume %s was created in VNX, but in"
" Offline state." % volume_name,
self.driver.create_volume,
offline_volume)
def test_create_volume_snapshot_failed(self):
commands = [self.testData.SNAP_CREATE_CMD('failed_snapshot')]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
# case
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.create_snapshot,
self.testData.test_failed_snapshot)
# verification
expect_cmd = [
mock.call(
*self.testData.SNAP_CREATE_CMD('failed_snapshot'),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
def test_create_volume_from_snapshot(self):
# set up
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [cmd_dest, cmd_dest_np, cmd_migrate,
cmd_migrate_verify]
results = [output_dest, output_dest, output_migrate,
output_migrate_verify]
fake_cli1 = self.driverSetup(commands, results)
self.driver.create_volume_from_snapshot(self.testData.test_volume2,
self.testData.test_snapshot)
expect_cmd1 = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True)]
fake_cli1.assert_has_calls(expect_cmd1)
self.configuration.ignore_pool_full_threshold = True
fake_cli2 = self.driverSetup(commands, results)
self.driver.create_volume_from_snapshot(self.testData.test_volume2,
self.testData.test_snapshot)
expect_cmd2 = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None,
ignore_thresholds=True))]
fake_cli2.assert_has_calls(expect_cmd2)
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'copytype:snap': 'true'}))
def test_create_volume_from_snapshot_smp(self):
fake_cli = self.driverSetup()
vol = self.driver.create_volume_from_snapshot(
self.testData.test_volume_with_type,
self.testData.test_snapshot)
self.assertTrue(
vol['provider_location'].find('type^smp') > 0)
expect_cmd = [
mock.call(
*self.testData.SNAP_COPY_CMD(
src_snap='snapshot1',
snap_name='snap-as-vol-%s' % '1')),
mock.call(
*self.testData.SNAP_MODIFY_CMD(
name='snap-as-vol-%s' % '1',
rw='yes')),
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol_with_type', source='vol1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol_with_type', snapName='snap-as-vol-%s' % '1'))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_create_volume_from_snapshot_sync_failed(self):
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
cmd_detach_lun = ('lun', '-detach', '-name', 'vol2', '-o')
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
cmd_migrate_cancel = self.testData.MIGRATION_CANCEL_CMD(1)
output_migrate_cancel = ("", 0)
commands = [cmd_dest, cmd_dest_np, cmd_migrate,
cmd_migrate_verify, cmd_migrate_cancel]
results = [output_dest, output_dest, output_migrate,
[FAKE_ERROR_RETURN, output_migrate_verify],
output_migrate_cancel]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.testData.test_volume2,
self.testData.test_snapshot)
expect_cmd = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.MIGRATION_CANCEL_CMD(1)),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=False),
mock.call(*self.testData.LUN_DELETE_CMD('vol2_dest')),
mock.call(*cmd_detach_lun),
mock.call(*self.testData.LUN_DELETE_CMD('vol2'))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_vol_from_snap_failed_in_migrate_lun(self):
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
cmd_detach_lun = ('lun', '-detach', '-name', 'vol2', '-o')
commands = [cmd_dest, cmd_migrate]
results = [output_dest, FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.testData.test_volume2,
self.testData.test_snapshot)
expect_cmd = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'), poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
poll=True,
retry_disable=True),
mock.call(*self.testData.LUN_DELETE_CMD('vol2_dest')),
mock.call(*cmd_detach_lun),
mock.call(*self.testData.LUN_DELETE_CMD('vol2'))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_cloned_volume(self):
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("clone1_dest")
cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD("clone1_dest")
output_dest = self.testData.LUN_PROPERTY("clone1_dest")
cmd_clone = self.testData.LUN_PROPERTY_ALL_CMD("clone1")
output_clone = self.testData.LUN_PROPERTY("clone1")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [cmd_dest, cmd_dest_p, cmd_clone, cmd_migrate,
cmd_migrate_verify]
results = [output_dest, output_dest, output_clone, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
volume = self.testData.test_volume.copy()
volume['name'] = 'clone1'
self.driver.create_cloned_volume(volume, self.testData.test_volume)
tmp_snap = 'tmp-snap-' + self.testData.test_volume['id']
expect_cmd = [
mock.call(
*self.testData.SNAP_CREATE_CMD(tmp_snap), poll=False),
mock.call(*self.testData.SNAP_MP_CREATE_CMD(
name='clone1',
source='vol1'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('clone1'),
poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='clone1', snapName=tmp_snap)),
mock.call(*self.testData.LUN_CREATION_CMD(
'clone1_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('clone1_dest'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('clone1_dest'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
poll=True,
retry_disable=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'copytype:snap': 'true'}))
def test_create_cloned_volume_smp(self):
fake_cli = self.driverSetup()
vol = self.driver.create_cloned_volume(
self.testData.test_clone,
self.testData.test_volume_with_type)
self.assertTrue(
vol['provider_location'].find('type^smp') > 0)
expect_cmd = [
mock.call(
*self.testData.SNAP_CREATE_CMD(
name='snap-as-vol-%s' % '2'),
poll=False),
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='clone1', source='vol_with_type'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('clone1'),
poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='clone1', snapName='snap-as-vol-%s' % '2'))]
fake_cli.assert_has_calls(expect_cmd)
def test_delete_volume_failed(self):
commands = [self.testData.LUN_DELETE_CMD('failed_vol1')]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_failed_volume)
expected = [mock.call(*self.testData.LUN_DELETE_CMD('failed_vol1'))]
fake_cli.assert_has_calls(expected)
def test_delete_volume_in_sg_failed(self):
commands = [self.testData.LUN_DELETE_CMD('vol1_in_sg'),
self.testData.LUN_DELETE_CMD('vol2_in_sg')]
results = [self.testData.LUN_DELETE_IN_SG_ERROR(),
self.testData.LUN_DELETE_IN_SG_ERROR(False)]
self.driverSetup(commands, results)
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_volume1_in_sg)
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_volume2_in_sg)
def test_delete_volume_in_sg_force(self):
commands = [self.testData.LUN_DELETE_CMD('vol1_in_sg'),
self.testData.STORAGEGROUP_LIST_CMD(),
self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost1',
'41'),
self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost1',
'42'),
self.testData.LUN_DELETE_CMD('vol2_in_sg'),
self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost2',
'31'),
self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost2',
'32')]
results = [[self.testData.LUN_DELETE_IN_SG_ERROR(),
SUCCEED],
self.testData.STORAGE_GROUPS_HAS_MAP('fakehost1',
'fakehost2'),
SUCCEED,
SUCCEED,
[self.testData.LUN_DELETE_IN_SG_ERROR(False),
SUCCEED],
SUCCEED,
SUCCEED]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.force_delete_lun_in_sg = True
self.driver.delete_volume(self.testData.test_volume1_in_sg)
self.driver.delete_volume(self.testData.test_volume2_in_sg)
expected = [mock.call(*self.testData.LUN_DELETE_CMD('vol1_in_sg')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD(),
poll=True),
mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
'fakehost1', '41'), poll=False),
mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
'fakehost2', '42'), poll=False),
mock.call(*self.testData.LUN_DELETE_CMD('vol1_in_sg')),
mock.call(*self.testData.LUN_DELETE_CMD('vol2_in_sg')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD(),
poll=True),
mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
'fakehost1', '31'), poll=False),
mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
'fakehost2', '32'), poll=False),
mock.call(*self.testData.LUN_DELETE_CMD('vol2_in_sg'))]
fake_cli.assert_has_calls(expected)
def test_delete_volume_smp(self):
fake_cli = self.driverSetup()
vol = self.testData.test_volume_with_type.copy()
vol['provider_location'] = 'system^FNM11111|type^smp|id^1'
tmp_snap = 'snap-as-vol-%s' % vol['id']
self.driver.delete_volume(vol)
expected = [mock.call(*self.testData.LUN_DELETE_CMD(vol['name'])),
mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
poll=True)]
fake_cli.assert_has_calls(expected)
def test_extend_volume(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol1')]
results = [self.testData.LUN_PROPERTY('vol1', size=2)]
fake_cli = self.driverSetup(commands, results)
# case
self.driver.extend_volume(self.testData.test_volume, 2)
expected = [mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False)]
fake_cli.assert_has_calls(expected)
def test_extend_volume_has_snapshot(self):
commands = [self.testData.LUN_EXTEND_CMD('failed_vol1', 2)]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.extend_volume,
self.testData.test_failed_volume,
2)
expected = [mock.call(*self.testData.LUN_EXTEND_CMD('failed_vol1', 2),
poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_extend_volume_failed(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1')]
results = [self.testData.LUN_PROPERTY('failed_vol1', size=2)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli._client.timeout = 0
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.testData.test_failed_volume,
3)
expected = [
mock.call(
*self.testData.LUN_EXTEND_CMD('failed_vol1', 3),
poll=False),
mock.call(
*self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1'),
poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_extend_preparing_volume(self):
commands = [self.testData.LUN_EXTEND_CMD('vol1', 2),
self.testData.LUN_PROPERTY_ALL_CMD('vol1')]
results = [[self.testData.LUN_PREP_ERROR(), SUCCEED],
[self.testData.LUN_PROPERTY('vol1', size=1,
operation='Preparing'),
self.testData.LUN_PROPERTY('vol1', size=1,
operation='Optimizing'),
self.testData.LUN_PROPERTY('vol1', size=1,
operation='None'),
self.testData.LUN_PROPERTY('vol1', size=2)]]
fake_cli = self.driverSetup(commands, results)
self.driver.extend_volume(self.testData.test_volume, 2)
expected = [mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=True),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False)]
fake_cli.assert_has_calls(expected)
def test_manage_existing(self):
lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
'-newName', 'vol_with_type', '-o')
commands = [lun_rename_cmd]
results = [SUCCEED]
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
fake_cli = self.driverSetup(commands, results)
self.driver.manage_existing(
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
expected = [mock.call(*lun_rename_cmd, poll=False)]
fake_cli.assert_has_calls(expected)
def test_manage_existing_source_name(self):
lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
'-newName', 'vol_with_type', '-o')
commands = [lun_rename_cmd]
results = [SUCCEED]
fake_cli = self.driverSetup(commands, results)
self.driver.manage_existing(
self.testData.test_volume_with_type,
self.testData.test_existing_ref_source_name)
expected = [mock.call(*lun_rename_cmd, poll=False)]
fake_cli.assert_has_calls(expected)
def test_manage_existing_lun_in_another_pool(self):
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-userCap', '-owner',
'-attachedSnapshot', '-poolName')
invalid_pool_name = "fake_pool"
commands = [get_lun_cmd]
results = [self.testData.LUN_PROPERTY('lun_name',
pool_name=invalid_pool_name)]
self.configuration.storage_vnx_pool_name = invalid_pool_name
fake_cli = self.driverSetup(commands, results)
# mock the command executor
ex = self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
self.assertTrue(
re.match(r'.*not managed by the host',
ex.msg))
expected = [mock.call(*get_lun_cmd, poll=True)]
fake_cli.assert_has_calls(expected)
def test_manage_existing_get_size(self):
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-userCap', '-owner',
'-attachedSnapshot', '-poolName')
test_size = 2
commands = [get_lun_cmd]
results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
fake_cli = self.driverSetup(commands, results)
get_size = self.driver.manage_existing_get_size(
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
expected = [mock.call(*get_lun_cmd, poll=True)]
assert get_size == test_size
fake_cli.assert_has_calls(expected)
# Test the function with invalid reference.
invaild_ref = {'fake': 'fake_ref'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
self.testData.test_volume_with_type,
invaild_ref)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'compressed'}))
def test_retype_compressed_to_deduplicated(self):
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('compressed',
'deduplicated')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'deduplicated'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD(),
cmd_migrate_verify]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023),
output_migrate_verify]
fake_cli1 = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd1 = [
mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol3-123456'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, None),
retry_disable=True,
poll=True)]
fake_cli1.assert_has_calls(expect_cmd1)
self.configuration.ignore_pool_full_threshold = True
fake_cli2 = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd2 = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None,
ignore_thresholds=True))]
fake_cli2.assert_has_calls(expect_cmd2)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'thin'}))
def test_retype_thin_to_compressed_auto(self):
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('thin',
'compressed'),
'storagetype:tiering': (None, 'auto')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'compressed',
'storagetype:tiering': 'auto'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD(),
cmd_migrate_verify]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023),
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'compressed', 'auto')),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'deduplicated',
'storagetype:pool': 'unit_test_pool'}))
def test_retype_pool_changed_dedup_to_compressed_auto(self):
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('deduplicated',
'compressed'),
'storagetype:tiering': (None, 'auto'),
'storagetype:pool': ('unit_test_pool',
'unit_test_pool2')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'compressed',
'storagetype:tiering': 'auto',
'storagetype:pool':
'unit_test_pool2'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host':
'ubuntu-server12@pool_backend_1
'capabilities':
{'location_info': 'unit_test_pool2|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1)]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023),
('The specified source LUN is not currently migrating', 23)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool2', 'compressed', 'auto')),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'compressed',
'storagetype:pool': 'unit_test_pool',
'storagetype:tiering': 'auto'}))
def test_retype_compressed_auto_to_compressed_nomovement(self):
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:tiering': ('auto', 'nomovement')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'compressed',
'storagetype:tiering': 'nomovement',
'storagetype:pool':
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {
'host': 'host@backendsec
'capabilities': {
'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD()]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call('lun', '-modify', '-name', 'vol3', '-o', '-initialTier',
'optimizePool', '-tieringPolicy', 'noMovement')]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'thin',
'storagetype:pool': 'unit_test_pool'}))
def test_retype_compressed_to_thin_cross_array(self):
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('compressed', 'thin')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning': 'thin',
'storagetype:pool':
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {
'host': 'ubuntu-server12@pool_backend_2
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500891',
'volume_backend_name': 'pool_backend_2',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD()]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
retyped = self.driver.retype(None, self.testData.test_volume3,
new_type_data, diff_data,
host_test_data)
self.assertFalse(retyped,
"Retype should failed due to"
" different protocol or array")
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'thin',
'storagetype:tiering': 'auto',
'storagetype:pool': 'unit_test_pool'}))
def test_retype_thin_auto_to_dedup_diff_procotol(self):
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('thin', 'deduplicated'),
'storagetype:tiering': ('auto', None)}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'deduplicated',
'storagetype:pool':
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {
'host': 'ubuntu-server12@pool_backend_2
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_2',
'storage_protocol': 'FC'}}
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1)]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023),
('The specified source LUN is not currently migrating', 23)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'thin',
'storagetype:tiering': 'auto',
'storagetype:pool': 'unit_test_pool'}))
def test_retype_thin_auto_has_snap_to_thick_highestavailable(self):
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('thin', None),
'storagetype:tiering': ('auto', 'highestAvailable')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:tiering':
'highestAvailable',
'storagetype:pool':
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {
'host': 'ubuntu-server12@pool_backend_1
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD()]
results = [self.testData.NDU_LIST_RESULT,
('Has snap', 0)]
self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
retyped = self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
self.assertFalse(retyped,
"Retype should failed due to"
" different protocol or array")
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'thin',
'storagetype:tiering': 'auto',
'storagetype:pool': 'unit_test_pool'}))
def test_retype_thin_auto_to_thin_auto(self):
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs': {}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:tiering':
'auto',
'storagetype:provisioning':
'thin'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {
'host': 'ubuntu-server12@pool_backend_1
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD]
results = [self.testData.NDU_LIST_RESULT]
self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper."
"migrate_lun_with_verification",
mock.Mock(return_value=True))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper."
"create_lun_with_advance_feature",
mock.Mock(return_value={'lun_id': '1'}))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'thin',
'copytype:snap': 'true'}))
def test_retype_copytype_snap_true_to_false(self):
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'copytype:snap': ('true',
'false')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning': 'thin',
'copytype:snap': 'false'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [self.testData.NDU_LIST_CMD,
self.testData.SNAP_LIST_CMD(),
cmd_migrate_verify]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023),
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
vol = self.testData.test_volume3.copy()
vol['provider_location'] = 'system^FNM11111|type^smp|id^1'
tmp_snap = 'snap-as-vol-%s' % vol['id']
ret = self.driver.retype(None, vol,
new_type_data,
diff_data,
host_test_data)
self.assertTrue(type(ret) == tuple)
self.assertTrue(ret[0])
self.assertTrue(
ret[1]['provider_location'].find('type^lun') > 0)
expect_cmd = [
mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'fast_cache_enabled': 'True'}))
def test_create_volume_with_fastcache(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_PROPERTY_W_FASTCACHE_CMD,
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_PROPERTY_W_FASTCACHE,
self.testData.LUN_PROPERTY('vol_with_type', True),
]
fake_cli = self.driverSetup(commands, results)
lun_info = {'lun_name': "vol_with_type",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready",
'status': 'OK(0x0)',
'operation': 'None'
}
cli_helper = self.driver.cli._client
cli_helper.command_execute = fake_cli
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.get_enablers_on_array = mock.Mock(return_value="-FASTCache")
cli_helper.get_pool_list = mock.Mock(return_value=[{
'lun_nums': 1000,
'total_capacity_gb': 10,
'free_capacity_gb': 5,
'provisioned_capacity_gb': 8,
'pool_name': "unit_test_pool",
'fast_cache_enabled': 'True',
'state': 'Ready',
'pool_full_threshold': 70.0}])
self.driver.update_volume_stats()
self.driver.create_volume(self.testData.test_volume_with_type)
pool_stats = self.driver.cli.stats['pools'][0]
self.assertEqual('True', pool_stats['fast_cache_enabled'])
expect_cmd = [
mock.call('connection', '-getport', '-address', '-vlanid',
poll=False),
mock.call('-np', 'lun', '-create', '-capacity',
1, '-sq', 'gb', '-poolName',
self.testData.test_pool_name,
'-name', 'vol_with_type', '-type', 'NonThin')
]
fake_cli.assert_has_calls(expect_cmd)
def test_get_lun_id_provider_location_exists(self):
self.driverSetup()
volume_01 = {
'name': 'vol_01',
'size': 1,
'volume_name': 'vol_01',
'id': '1',
'name_id': '1',
'provider_location': 'system^FNM11111|type^lun|id^4',
'project_id': 'project',
'display_name': 'vol_01',
'display_description': 'test volume',
'volume_type_id': None}
self.assertEqual(4, self.driver.cli.get_lun_id(volume_01))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 2}))
def test_get_lun_id_provider_location_has_no_lun_id(self):
self.driverSetup()
volume_02 = {
'name': 'vol_02',
'size': 1,
'volume_name': 'vol_02',
'id': '2',
'provider_location': 'system^FNM11111|type^lun|',
'project_id': 'project',
'display_name': 'vol_02',
'display_description': 'test volume',
'volume_type_id': None}
self.assertEqual(2, self.driver.cli.get_lun_id(volume_02))
def test_create_consistency_group(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.CREATE_CONSISTENCYGROUP_CMD(cg_name),
self.testData.GET_CG_BY_NAME_CMD(cg_name)]
results = [SUCCEED, self.testData.CG_PROPERTY(cg_name)]
fake_cli = self.driverSetup(commands, results)
model_update = self.driver.create_consistencygroup(
None, self.testData.test_cg)
self.assertDictMatch({'status': 'available'}, model_update)
expect_cmd = [
mock.call(
*self.testData.CREATE_CONSISTENCYGROUP_CMD(
cg_name), poll=False),
mock.call(
*self.testData.GET_CG_BY_NAME_CMD(cg_name))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_consistency_group_retry(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.CREATE_CONSISTENCYGROUP_CMD(cg_name),
self.testData.GET_CG_BY_NAME_CMD(cg_name)]
results = [SUCCEED,
[self.testData.CG_NOT_FOUND(),
self.testData.CG_PROPERTY(cg_name)]]
fake_cli = self.driverSetup(commands, results)
model_update = self.driver.create_consistencygroup(
None, self.testData.test_cg)
self.assertDictMatch({'status': 'available'}, model_update)
expect_cmd = [
mock.call(
*self.testData.CREATE_CONSISTENCYGROUP_CMD(
cg_name), poll=False),
mock.call(
*self.testData.GET_CG_BY_NAME_CMD(cg_name)),
mock.call(
*self.testData.GET_CG_BY_NAME_CMD(cg_name))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.volume_types.get_volume_type_extra_specs",
mock.Mock(side_effect=[{'storagetype:provisioning': 'thin'},
{'storagetype:provisioning': 'compressed'}]))
def test_create_consistency_group_failed_with_compression(self):
self.driverSetup([], [])
self.assertRaisesRegex(exception.VolumeBackendAPIException,
"Failed to create consistency group "
"consistencygroup_id "
"because VNX consistency group cannot "
"accept compressed LUNs as members.",
self.driver.create_consistencygroup,
None,
self.testData.test_cg_with_type)
def test_delete_consistency_group(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.DELETE_CONSISTENCYGROUP_CMD(cg_name),
self.testData.LUN_DELETE_CMD('vol1')]
results = [SUCCEED, SUCCEED]
fake_cli = self.driverSetup(commands, results)
self.driver.db = mock.MagicMock()
self.driver.db.volume_get_all_by_group.return_value =\
self.testData.CONSISTENCY_GROUP_VOLUMES()
self.driver.delete_consistencygroup(None,
self.testData.test_cg)
expect_cmd = [
mock.call(
*self.testData.DELETE_CONSISTENCYGROUP_CMD(
cg_name)),
mock.call(*self.testData.LUN_DELETE_CMD('vol1')),
mock.call(*self.testData.LUN_DELETE_CMD('vol1'))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
'cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_create_cgsnapshot(self, get_all_for_cgsnapshot):
cgsnapshot = self.testData.test_cgsnapshot['id']
cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
commands = [self.testData.CREATE_CG_SNAPSHOT(cg_name, cgsnapshot),
self.testData.GET_SNAP(cgsnapshot)]
results = [SUCCEED,
SUCCEED]
fake_cli = self.driverSetup(commands, results)
snapshot_obj = fake_snapshot.fake_snapshot_obj(
self.testData.SNAPS_IN_SNAP_GROUP())
snapshot_obj.consistencygroup_id = cg_name
get_all_for_cgsnapshot.return_value = [snapshot_obj]
self.driver.create_cgsnapshot(None, self.testData.test_cgsnapshot)
expect_cmd = [
mock.call(
*self.testData.CREATE_CG_SNAPSHOT(
cg_name, cgsnapshot)),
mock.call(
*self.testData.GET_SNAP(cgsnapshot))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
'cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_create_cgsnapshot_retry(self, get_all_for_cgsnapshot):
cgsnapshot = self.testData.test_cgsnapshot['id']
cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
commands = [self.testData.CREATE_CG_SNAPSHOT(cg_name, cgsnapshot),
self.testData.GET_SNAP(cgsnapshot)]
results = [SUCCEED,
[self.testData.SNAP_NOT_EXIST(), SUCCEED]]
fake_cli = self.driverSetup(commands, results)
snapshot_obj = fake_snapshot.fake_snapshot_obj(
self.testData.SNAPS_IN_SNAP_GROUP())
snapshot_obj.consistencygroup_id = cg_name
get_all_for_cgsnapshot.return_value = [snapshot_obj]
self.driver.create_cgsnapshot(None, self.testData.test_cgsnapshot)
expect_cmd = [
mock.call(
*self.testData.CREATE_CG_SNAPSHOT(
cg_name, cgsnapshot)),
mock.call(
*self.testData.GET_SNAP(cgsnapshot)),
mock.call(
*self.testData.GET_SNAP(cgsnapshot))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
'cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_delete_cgsnapshot(self, get_all_for_cgsnapshot):
snap_name = self.testData.test_cgsnapshot['id']
commands = [self.testData.DELETE_CG_SNAPSHOT(snap_name)]
results = [SUCCEED]
fake_cli = self.driverSetup(commands, results)
snapshot_obj = fake_snapshot.fake_snapshot_obj(
self.testData.SNAPS_IN_SNAP_GROUP())
cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
snapshot_obj.consistencygroup_id = cg_name
get_all_for_cgsnapshot.return_value = [snapshot_obj]
self.driver.delete_cgsnapshot(None,
self.testData.test_cgsnapshot)
expect_cmd = [
mock.call(
*self.testData.DELETE_CG_SNAPSHOT(
snap_name))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_add_volume_to_cg(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
self.testData.ADD_LUN_TO_CG_CMD('cg_id', 1),
self.testData.GET_CG_BY_NAME_CMD('cg_id')
]
results = [self.testData.LUN_PROPERTY('vol1', True),
SUCCEED,
self.testData.CG_PROPERTY('cg_id')]
fake_cli = self.driverSetup(commands, results)
self.driver.create_volume(self.testData.test_volume_cg)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1', 1,
'unit_test_pool',
None, None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
poll=False),
mock.call(*self.testData.ADD_LUN_TO_CG_CMD(
'cg_id', 1), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
def test_create_cloned_volume_from_consistnecy_group(self):
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
output_dest = self.testData.LUN_PROPERTY("vol1_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
commands = [cmd_dest, cmd_dest_p, cmd_migrate,
cmd_migrate_verify]
results = [output_dest, output_dest, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.create_cloned_volume(self.testData.test_volume_clone_cg,
self.testData.test_clone_cg)
tmp_cgsnapshot = 'tmp-cgsnapshot-' + self.testData.test_volume['id']
expect_cmd = [
mock.call(
*self.testData.CREATE_CG_SNAPSHOT(cg_name, tmp_cgsnapshot)),
mock.call(
*self.testData.GET_SNAP(tmp_cgsnapshot)),
mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='vol1',
source='clone1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'), poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol1', snapName=tmp_cgsnapshot)),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True),
mock.call(*self.testData.DELETE_CG_SNAPSHOT(tmp_cgsnapshot))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_volume_from_cgsnapshot(self):
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [cmd_dest, cmd_dest_np, cmd_migrate,
cmd_migrate_verify]
results = [output_dest, output_dest, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.create_volume_from_snapshot(
self.testData.volume_in_cg, self.testData.test_member_cgsnapshot)
expect_cmd = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
poll=True),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='cgsnapshot_id')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
poll=False),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True,
poll=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
poll=True)]
fake_cli.assert_has_calls(expect_cmd)
def test_update_consistencygroup(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name)]
results = [self.testData.CG_PROPERTY(cg_name)]
fake_cli = self.driverSetup(commands, results)
(model_update, add_vols, remove_vols) = (
self.driver.update_consistencygroup(None, self.testData.test_cg,
self.testData.
VOLUMES_NOT_IN_CG(),
self.testData.VOLUMES_IN_CG()))
expect_cmd = [
mock.call(*self.testData.REPLACE_LUNS_IN_CG_CMD(
cg_name, ['4', '5']), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
self.assertEqual('available', model_update['status'])
def test_update_consistencygroup_remove_all(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name)]
results = [self.testData.CG_PROPERTY(cg_name)]
fake_cli = self.driverSetup(commands, results)
(model_update, add_vols, remove_vols) = (
self.driver.update_consistencygroup(None, self.testData.test_cg,
None,
self.testData.VOLUMES_IN_CG()))
expect_cmd = [
mock.call(*self.testData.REMOVE_LUNS_FROM_CG_CMD(
cg_name, ['1', '3']), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
self.assertEqual('available', model_update['status'])
def test_update_consistencygroup_remove_not_in_cg(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name)]
results = [self.testData.CG_PROPERTY(cg_name)]
fake_cli = self.driverSetup(commands, results)
(model_update, add_vols, remove_vols) = (
self.driver.update_consistencygroup(None, self.testData.test_cg,
None,
self.testData.
VOLUMES_NOT_IN_CG()))
expect_cmd = [
mock.call(*self.testData.REPLACE_LUNS_IN_CG_CMD(
cg_name, ['1', '3']), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
self.assertEqual('available', model_update['status'])
def test_update_consistencygroup_error(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name),
self.testData.REPLACE_LUNS_IN_CG_CMD(
cg_name, ['1', '3'])]
results = [self.testData.CG_PROPERTY(cg_name),
self.testData.CG_REPL_ERROR()]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.update_consistencygroup,
None,
self.testData.test_cg,
[],
self.testData.VOLUMES_NOT_IN_CG())
expect_cmd = [
mock.call(*self.testData.REPLACE_LUNS_IN_CG_CMD(
cg_name, ['1', '3']), poll=False)]
fake_cli.assert_has_calls(expect_cmd)
def test_create_consistencygroup_from_cgsnapshot(self):
output_migrate_verify = ('The specified source LUN '
'is not currently migrating.', 23)
new_cg = self.testData.test_cg.copy()
new_cg.update(
{'id': 'new_cg_id'})
vol1_in_new_cg = self.testData.test_volume_cg.copy()
vol1_in_new_cg.update(
{'name': 'vol1_in_cg',
'id': '111111',
'consistencygroup_id': 'new_cg_id',
'provider_location': None})
vol2_in_new_cg = self.testData.test_volume_cg.copy()
vol2_in_new_cg.update(
{'name': 'vol2_in_cg',
'id': '222222',
'consistencygroup_id': 'new_cg_id',
'provider_location': None})
src_cgsnap = self.testData.test_cgsnapshot
snap1_in_src_cgsnap = self.testData.test_member_cgsnapshot.copy()
snap1_in_src_cgsnap.update(
{'volume': self.testData.test_volume,
'volume_name': 'src_vol1'})
snap2_in_src_cgsnap = self.testData.test_member_cgsnapshot.copy()
snap2_in_src_cgsnap.update(
{'volume': self.testData.test_volume2,
'volume_name': 'src_vol2'})
copied_snap_name = 'temp_snapshot_for_%s' % new_cg['id']
td = self.testData
commands = [td.SNAP_COPY_CMD(src_cgsnap['id'], copied_snap_name),
td.ALLOW_READWRITE_ON_SNAP_CMD(copied_snap_name),
td.SNAP_MP_CREATE_CMD(vol1_in_new_cg['name'],
snap1_in_src_cgsnap['volume_name']),
td.SNAP_ATTACH_CMD(vol1_in_new_cg['name'],
copied_snap_name),
td.LUN_CREATION_CMD(vol1_in_new_cg['name'] + '_dest',
vol1_in_new_cg['size'],
'unit_test_pool', 'thin', None),
td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name'] + '_dest'),
td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']),
td.MIGRATION_CMD(6231, 1),
td.SNAP_MP_CREATE_CMD(vol2_in_new_cg['name'],
snap2_in_src_cgsnap['volume_name']),
td.SNAP_ATTACH_CMD(vol2_in_new_cg['name'],
copied_snap_name),
td.LUN_CREATION_CMD(vol2_in_new_cg['name'] + '_dest',
vol2_in_new_cg['size'],
'unit_test_pool', 'thin', None),
td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name'] + '_dest'),
td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']),
td.MIGRATION_CMD(6232, 2),
td.MIGRATION_VERIFY_CMD(6231),
td.MIGRATION_VERIFY_CMD(6232),
td.CREATE_CONSISTENCYGROUP_CMD(new_cg['id'], [6231, 6232]),
td.DELETE_CG_SNAPSHOT(copied_snap_name)
]
results = [SUCCEED, SUCCEED, SUCCEED, SUCCEED, SUCCEED,
td.LUN_PROPERTY(vol1_in_new_cg['name'] + '_dest',
lunid=1),
td.LUN_PROPERTY(vol1_in_new_cg['name'], lunid=6231),
SUCCEED, SUCCEED, SUCCEED, SUCCEED,
td.LUN_PROPERTY(vol2_in_new_cg['name'] + '_dest',
lunid=2),
td.LUN_PROPERTY(vol2_in_new_cg['name'], lunid=6232),
SUCCEED, output_migrate_verify, output_migrate_verify,
SUCCEED, SUCCEED]
fake_cli = self.driverSetup(commands, results)
cg_model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
None, new_cg, [vol1_in_new_cg, vol2_in_new_cg],
cgsnapshot=src_cgsnap, snapshots=[snap1_in_src_cgsnap,
snap2_in_src_cgsnap],
source_cg=None, source_vols=None))
self.assertEqual(2, len(volumes_model_update))
self.assertTrue('id^%s' % 6231 in
volumes_model_update[0]['provider_location'])
self.assertTrue('id^%s' % 6232 in
volumes_model_update[1]['provider_location'])
expect_cmd = [
mock.call(*td.SNAP_COPY_CMD(src_cgsnap['id'], copied_snap_name)),
mock.call(*td.ALLOW_READWRITE_ON_SNAP_CMD(copied_snap_name)),
mock.call(*td.SNAP_MP_CREATE_CMD(vol1_in_new_cg['name'],
snap1_in_src_cgsnap['volume_name']),
poll=False),
mock.call(*td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']),
poll=True),
mock.call(*td.SNAP_ATTACH_CMD(vol1_in_new_cg['name'],
copied_snap_name)),
mock.call(*td.LUN_CREATION_CMD(vol1_in_new_cg['name'] + '_dest',
vol1_in_new_cg['size'],
'unit_test_pool', 'thick', None)),
mock.call(*td.LUN_PROPERTY_ALL_CMD(
vol1_in_new_cg['name'] + '_dest'), poll=False),
mock.call(*td.LUN_PROPERTY_ALL_CMD(
vol1_in_new_cg['name'] + '_dest'), poll=False),
mock.call(*td.MIGRATION_CMD(6231, 1),
poll=True, retry_disable=True),
mock.call(*td.SNAP_MP_CREATE_CMD(vol2_in_new_cg['name'],
snap2_in_src_cgsnap['volume_name']),
poll=False),
mock.call(*td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']),
poll=True),
mock.call(*td.SNAP_ATTACH_CMD(vol2_in_new_cg['name'],
copied_snap_name)),
mock.call(*td.LUN_CREATION_CMD(vol2_in_new_cg['name'] + '_dest',
vol2_in_new_cg['size'],
'unit_test_pool', 'thick', None)),
mock.call(*td.LUN_PROPERTY_ALL_CMD(
vol2_in_new_cg['name'] + '_dest'), poll=False),
mock.call(*td.LUN_PROPERTY_ALL_CMD(
vol2_in_new_cg['name'] + '_dest'), poll=False),
mock.call(*td.MIGRATION_CMD(6232, 2),
poll=True, retry_disable=True),
mock.call(*td.MIGRATION_VERIFY_CMD(6232), poll=True),
mock.call(*td.MIGRATION_VERIFY_CMD(6231), poll=True),
mock.call(*td.CREATE_CONSISTENCYGROUP_CMD(
new_cg['id'], [6232, 6231]), poll=True),
mock.call(*td.GET_CG_BY_NAME_CMD(new_cg['id'])),
mock.call(*td.DELETE_CG_SNAPSHOT(copied_snap_name))]
self.assertEqual(expect_cmd, fake_cli.call_args_list)
def test_create_consistencygroup_from_othersource(self):
new_cg = self.testData.test_cg.copy()
new_cg.update(
{'id': 'new_cg_id'})
vol1_in_new_cg = self.testData.test_volume_cg.copy()
vol1_in_new_cg.update(
{'name': 'vol1_in_cg',
'id': '111111',
'consistencygroup_id': 'new_cg_id',
'provider_location': None})
vol2_in_new_cg = self.testData.test_volume_cg.copy()
vol2_in_new_cg.update(
{'name': 'vol2_in_cg',
'id': '222222',
'consistencygroup_id': 'new_cg_id',
'provider_location': None})
self.driverSetup()
self.assertRaises(
exception.InvalidInput,
self.driver.create_consistencygroup_from_src,
new_cg, [vol1_in_new_cg, vol2_in_new_cg],
None, None, None, None)
def test_create_cg_from_cgsnapshot_migrate_failed(self):
new_cg = self.testData.test_cg.copy()
new_cg.update(
{'id': 'new_cg_id'})
vol1_in_new_cg = self.testData.test_volume_cg.copy()
vol1_in_new_cg.update(
{'name': 'vol1_in_cg',
'id': '111111',
'consistencygroup_id': 'new_cg_id',
'provider_location': None})
vol2_in_new_cg = self.testData.test_volume_cg.copy()
vol2_in_new_cg.update(
{'name': 'vol2_in_cg',
'id': '222222',
'consistencygroup_id': 'new_cg_id',
'provider_location': None})
src_cgsnap = self.testData.test_cgsnapshot
snap1_in_src_cgsnap = self.testData.test_member_cgsnapshot.copy()
snap1_in_src_cgsnap.update(
{'volume': self.testData.test_volume,
'volume_name': 'src_vol1'})
snap2_in_src_cgsnap = self.testData.test_member_cgsnapshot.copy()
snap2_in_src_cgsnap.update(
{'volume': self.testData.test_volume2,
'volume_name': 'src_vol2'})
copied_snap_name = 'temp_snapshot_for_%s' % new_cg['id']
td = self.testData
commands = [td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name'] + '_dest'),
td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']),
td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name'] + '_dest'),
td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']),
td.MIGRATION_CMD(6232, 2)]
results = [td.LUN_PROPERTY(vol1_in_new_cg['name'] + '_dest',
lunid=1),
td.LUN_PROPERTY(vol1_in_new_cg['name'], lunid=6231),
td.LUN_PROPERTY(vol2_in_new_cg['name'] + '_dest',
lunid=2),
td.LUN_PROPERTY(vol2_in_new_cg['name'], lunid=6232),
FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaisesRegex(exception.VolumeBackendAPIException,
'Migrate volume failed',
self.driver.create_consistencygroup_from_src,
None, new_cg, [vol1_in_new_cg, vol2_in_new_cg],
cgsnapshot=src_cgsnap,
snapshots=[snap1_in_src_cgsnap,
snap2_in_src_cgsnap],
source_cg=None, source_vols=None)
expect_cmd = [
mock.call(*self.testData.LUN_DELETE_CMD(
vol2_in_new_cg['name'] + '_dest')),
mock.call('lun', '-detach', '-name', vol2_in_new_cg['name'], '-o'),
mock.call(*self.testData.LUN_DELETE_CMD(vol2_in_new_cg['name'])),
mock.call(*self.testData.LUN_DELETE_CMD(
vol1_in_new_cg['name'] + '_dest')),
mock.call('lun', '-detach', '-name', vol1_in_new_cg['name'], '-o'),
mock.call(*self.testData.LUN_DELETE_CMD(vol1_in_new_cg['name'])),
mock.call(*td.SNAP_DELETE_CMD(copied_snap_name), poll=True)]
fake_cli.assert_has_calls(expect_cmd)
def test_deregister_initiator(self):
fake_cli = self.driverSetup()
self.driver.cli.destroy_empty_sg = True
self.driver.cli.itor_auto_dereg = True
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {1: 16}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
lun_info = {'lun_name': "unit_test_lun",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready"}
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
cli_helper.disconnect_host_from_storage_group = mock.Mock()
cli_helper.delete_storage_group = mock.Mock()
self.driver.terminate_connection(self.testData.test_volume,
self.testData.connector)
expect_cmd = [
mock.call('port', '-removeHBA', '-hbauid',
self.testData.connector['initiator'],
'-o')]
fake_cli.assert_has_calls(expect_cmd)
def test_unmanage(self):
self.driverSetup()
try:
self.driver.unmanage(self.testData.test_volume)
except NotImplementedError:
self.fail('Interface unmanage need to be implemented')
@mock.patch("random.shuffle", mock.Mock())
def test_find_available_iscsi_targets_without_pingnode(self):
self.configuration.iscsi_initiators = None
self.driverSetup()
port_a1 = {'Port WWN': 'fake_iqn_a1',
'SP': 'A',
'Port ID': 1,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_a1'}
port_a2 = {'Port WWN': 'fake_iqn_a2',
'SP': 'A',
'Port ID': 2,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_a2'}
port_b1 = {'Port WWN': 'fake_iqn_b1',
'SP': 'B',
'Port ID': 1,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_b1'}
all_targets = {'A': [port_a1, port_a2],
'B': [port_b1]}
targets = self.driver.cli._client.find_available_iscsi_targets(
'fakehost',
{('A', 2, 0), ('B', 1, 0)},
all_targets)
self.assertTrue(port_a2 in targets)
self.assertTrue(port_b1 in targets)
@mock.patch.object(emc_vnx_cli.CommandLineHelper,
'ping_node')
def test_find_available_iscsi_targets_with_pingnode(self, ping_node):
self.configuration.iscsi_initiators = (
'{"fakehost": ["10.0.0.2"]}')
self.driverSetup()
port_a1 = {'Port WWN': 'fake_iqn_a1',
'SP': 'A',
'Port ID': 1,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_a1'}
port_a2 = {'Port WWN': 'fake_iqn_a2',
'SP': 'A',
'Port ID': 2,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_a2'}
port_b1 = {'Port WWN': 'fake_iqn_b1',
'SP': 'B',
'Port ID': 1,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_b1'}
all_targets = {'A': [port_a1, port_a2],
'B': [port_b1]}
ping_node.side_effect = [False, False, True]
targets = self.driver.cli._client.find_available_iscsi_targets(
'fakehost',
{('A', 2, 0), ('A', 1, 0), ('B', 1, 0)},
all_targets)
self.assertTrue(port_a1 in targets)
self.assertTrue(port_a2 in targets)
self.assertTrue(port_b1 in targets)
@mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
'EMCVnxCliBase.get_lun_owner',
mock.Mock(return_value='A'))
@mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
'CommandLineHelper.get_registered_spport_set',
mock.Mock())
@mock.patch.object(emc_vnx_cli.CommandLineHelper,
'find_available_iscsi_targets')
def test_vnx_get_iscsi_properties(self, find_available_iscsi_targets):
self.driverSetup()
port_a1 = {'Port WWN': 'fake_iqn_a1',
'SP': 'A',
'Port ID': 1,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_a1'}
port_b1 = {'Port WWN': 'fake_iqn_b1',
'SP': 'B',
'Port ID': 1,
'Virtual Port ID': 0,
'IP Address': 'fake_ip_b1'}
find_available_iscsi_targets.return_value = [port_a1, port_b1]
connect_info = self.driver.cli.vnx_get_iscsi_properties(
self.testData.test_volume, self.testData.connector, 1, '')
expected_info = {
'target_discovered': True,
'target_iqns': [
'fake_iqn_a1',
'fake_iqn_b1'],
'target_iqn': 'fake_iqn_a1',
'target_luns': [1, 1],
'target_lun': 1,
'target_portals': [
'fake_ip_a1:3260',
'fake_ip_b1:3260'],
'target_portal': 'fake_ip_a1:3260',
'volume_id': '1'}
self.assertEqual(expected_info, connect_info)
def test_update_migrated_volume(self):
self.driverSetup()
expected_update = {'metadata': {'lun_type': 'lun'}}
model_update = self.driver.update_migrated_volume(
None, self.testData.test_volume,
self.testData.test_volume2, 'available')
self.assertDictMatch(expected_update, model_update)
class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
def setUp(self):
super(EMCVNXCLIDArrayBasedDriverTestCase, self).setUp()
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': None,
'volume_backend_name': 'namedbackend'})
def generate_driver(self, conf):
driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
return driver
def test_get_volume_stats(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True)]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True)]
self.driverSetup(commands, results)
stats = self.driver.get_volume_stats(True)
self.assertTrue(stats['driver_version'] == VERSION,
"driver_version is incorrect")
self.assertTrue(
stats['storage_protocol'] == 'iSCSI',
"storage_protocol is not correct")
self.assertTrue(
stats['vendor_name'] == "EMC",
"vendor name is not correct")
self.assertTrue(
stats['volume_backend_name'] == "namedbackend",
"volume backend name is not correct")
self.assertEqual(2, len(stats['pools']))
pool_stats1 = stats['pools'][0]
expected_pool_stats1 = {
'free_capacity_gb': 3105.303,
'reserved_percentage': 32,
'location_info': 'unit_test_pool|fakeSerial',
'total_capacity_gb': 3281.146,
'provisioned_capacity_gb': 536.140,
'compression_support': 'True',
'deduplication_support': 'True',
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'consistencygroup_support': 'True',
'pool_name': 'unit_test_pool',
'max_over_subscription_ratio': 20.0,
'fast_cache_enabled': 'True',
'fast_support': 'True'}
self.assertEqual(expected_pool_stats1, pool_stats1)
pool_stats2 = stats['pools'][1]
expected_pool_stats2 = {
'free_capacity_gb': 3984.768,
'reserved_percentage': 32,
'location_info': 'unit_test_pool2|fakeSerial',
'total_capacity_gb': 4099.992,
'provisioned_capacity_gb': 636.240,
'compression_support': 'True',
'deduplication_support': 'True',
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'consistencygroup_support': 'True',
'pool_name': 'unit_test_pool2',
'max_over_subscription_ratio': 20.0,
'fast_cache_enabled': 'False',
'fast_support': 'True'}
self.assertEqual(expected_pool_stats2, pool_stats2)
def test_get_volume_stats_wo_fastcache(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(False)]
results = [self.testData.NDU_LIST_RESULT_WO_LICENSE,
self.testData.POOL_GET_ALL_RESULT(False)]
self.driverSetup(commands, results)
stats = self.driver.get_volume_stats(True)
self.assertEqual(2, len(stats['pools']))
pool_stats1 = stats['pools'][0]
expected_pool_stats1 = {
'free_capacity_gb': 3105.303,
'reserved_percentage': 32,
'location_info': 'unit_test_pool|fakeSerial',
'total_capacity_gb': 3281.146,
'provisioned_capacity_gb': 536.140,
'compression_support': 'False',
'deduplication_support': 'False',
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'consistencygroup_support': 'False',
'pool_name': 'unit_test_pool',
'max_over_subscription_ratio': 20.0,
'fast_cache_enabled': 'False',
'fast_support': 'False'}
self.assertEqual(expected_pool_stats1, pool_stats1)
pool_stats2 = stats['pools'][1]
expected_pool_stats2 = {
'free_capacity_gb': 3984.768,
'reserved_percentage': 32,
'location_info': 'unit_test_pool2|fakeSerial',
'total_capacity_gb': 4099.992,
'provisioned_capacity_gb': 636.240,
'compression_support': 'False',
'deduplication_support': 'False',
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'consistencygroup_support': 'False',
'pool_name': 'unit_test_pool2',
'max_over_subscription_ratio': 20.0,
'fast_cache_enabled': 'False',
'fast_support': 'False'}
self.assertEqual(expected_pool_stats2, pool_stats2)
def test_get_volume_stats_storagepool_states(self):
commands = [self.testData.POOL_GET_ALL_CMD(False)]
results = [self.testData.POOL_GET_ALL_STATES_TEST
(['Initializing', 'Ready', 'Faulted',
'Offline', 'Deleting'])]
self.driverSetup(commands, results)
stats = self.driver.get_volume_stats(True)
self.assertTrue(
stats['pools'][0]['free_capacity_gb'] == 0,
"free_capacity_gb is incorrect")
self.assertTrue(
stats['pools'][1]['free_capacity_gb'] != 0,
"free_capacity_gb is incorrect")
self.assertTrue(
stats['pools'][2]['free_capacity_gb'] != 0,
"free_capacity_gb is incorrect")
self.assertTrue(
stats['pools'][3]['free_capacity_gb'] == 0,
"free_capacity_gb is incorrect")
self.assertTrue(
stats['pools'][4]['free_capacity_gb'] == 0,
"free_capacity_gb is incorrect")
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'deduplicated'}))
def test_create_volume_deduplicated(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type')]
results = [self.testData.LUN_PROPERTY('vol_with_type', True)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
# Case
self.driver.create_volume(self.testData.test_volume_with_type)
# Verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'deduplicated', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
def test_get_pool(self):
testVolume = self.testData.test_volume_with_type
commands = [self.testData.LUN_PROPERTY_POOL_CMD(testVolume['name'])]
results = [self.testData.LUN_PROPERTY(testVolume['name'], False)]
fake_cli = self.driverSetup(commands, results)
pool = self.driver.get_pool(testVolume)
self.assertEqual('unit_test_pool', pool)
fake_cli.assert_has_calls(
[mock.call(*self.testData.LUN_PROPERTY_POOL_CMD(
testVolume['name']), poll=False)])
def test_get_target_pool_for_cloned_volme(self):
testSrcVolume = self.testData.test_volume
testNewVolume = self.testData.test_volume2
fake_cli = self.driverSetup()
pool = self.driver.cli.get_target_storagepool(testNewVolume,
testSrcVolume)
self.assertEqual('unit_test_pool', pool)
self.assertFalse(fake_cli.called)
def test_get_target_pool_for_clone_legacy_volme(self):
testSrcVolume = self.testData.test_legacy_volume
testNewVolume = self.testData.test_volume2
commands = [self.testData.LUN_PROPERTY_POOL_CMD(testSrcVolume['name'])]
results = [self.testData.LUN_PROPERTY(testSrcVolume['name'], False)]
fake_cli = self.driverSetup(commands, results)
pool = self.driver.cli.get_target_storagepool(testNewVolume,
testSrcVolume)
self.assertEqual('unit_test_pool', pool)
fake_cli.assert_has_calls(
[mock.call(*self.testData.LUN_PROPERTY_POOL_CMD(
testSrcVolume['name']), poll=False)])
def test_manage_existing_get_size(self):
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-userCap', '-owner',
'-attachedSnapshot', '-poolName')
test_size = 2
commands = [get_lun_cmd]
results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
fake_cli = self.driverSetup(commands, results)
test_volume = self.testData.test_volume2.copy()
test_volume['host'] = "host@backendsec#unit_test_pool"
get_size = self.driver.manage_existing_get_size(
test_volume,
self.testData.test_existing_ref)
expected = [mock.call(*get_lun_cmd, poll=True)]
self.assertEqual(test_size, get_size)
fake_cli.assert_has_calls(expected)
def test_manage_existing_get_size_incorrect_pool(self):
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-userCap', '-owner',
'-attachedSnapshot', '-poolName')
commands = [get_lun_cmd]
results = [self.testData.LUN_PROPERTY('lun_name')]
fake_cli = self.driverSetup(commands, results)
test_volume = self.testData.test_volume2.copy()
test_volume['host'] = "host@backendsec#fake_pool"
ex = self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
test_volume,
self.testData.test_existing_ref)
self.assertTrue(
re.match(r'.*not managed by the host',
ex.msg))
expected = [mock.call(*get_lun_cmd, poll=True)]
fake_cli.assert_has_calls(expected)
def test_manage_existing(self):
lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
'-newName', 'vol_with_type', '-o')
commands = [lun_rename_cmd]
results = [SUCCEED]
fake_cli = self.driverSetup(commands, results)
self.driver.manage_existing(
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
expected = [mock.call(*lun_rename_cmd, poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"cinder.volume.volume_types."
"get_volume_type_extra_specs",
mock.Mock(return_value={'storagetype:provisioning': 'Compressed',
'storagetype:pool': 'unit_test_pool'}))
def test_create_compression_volume(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.stats['compression_support'] = 'True'
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
# Case
self.driver.create_volume(self.testData.test_volume_with_type)
# Verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'compressed', None, poll=False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type'), poll=True),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
1))]
fake_cli.assert_has_calls(expect_cmd)
def test_get_registered_spport_set(self):
self.driverSetup()
spport_set = self.driver.cli._client.get_registered_spport_set(
'iqn.1993-08.org.debian:01:222', 'fakehost',
self.testData.STORAGE_GROUP_HAS_MAP_ISCSI('fakehost')[0])
self.assertEqual({('A', 2, 0), ('A', 0, 0), ('B', 2, 0)}, spport_set)
def test_validate_iscsi_port(self):
self.driverSetup()
port_list = (
"SP: A\n"
"Port ID: 6\n"
"Port WWN: iqn.fake.a6\n"
"iSCSI Alias: 1111.a6\n"
"\n"
"Virtual Port ID: 0\n"
"VLAN ID: Disabled\n"
"\n"
"SP: B\n"
"Port ID: 7\n"
"Port WWN: iqn.fake.b7\n"
"iSCSI Alias: 0235.b7"
"\n"
"Virtual Port ID: 0\n"
"VLAN ID: Disabled\n"
"\n"
"Virtual Port ID: 1\n"
"VLAN ID: 200\n"
"\n\n")
self.assertFalse(self.driver.cli._validate_iscsi_port(
'A', 5, 0, port_list))
self.assertTrue(self.driver.cli._validate_iscsi_port(
'A', 6, 0, port_list))
self.assertFalse(self.driver.cli._validate_iscsi_port(
'A', 6, 2, port_list))
self.assertTrue(self.driver.cli._validate_iscsi_port(
'B', 7, 1, port_list))
self.assertTrue(self.driver.cli._validate_iscsi_port(
'B', 7, 0, port_list))
self.assertFalse(self.driver.cli._validate_iscsi_port(
'B', 7, 2, port_list))
class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
def generate_driver(self, conf):
return emc_cli_fc.EMCCLIFCDriver(configuration=conf)
@mock.patch(
"oslo_concurrency.processutils.execute",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_fc_auto_reg(self):
# Test for auto registration
test_volume = self.testData.test_volume.copy()
test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
self.configuration.initiator_auto_registration = True
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
self.driver.initialize_connection(
test_volume,
self.testData.connector)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('port', '-list', '-sp'),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:90'
':12:34:56', 'A', '0', None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:90'
':12:34:56', 'B', '2', None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78:90'
':54:32:16', 'A', '0', None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78:90'
':54:32:16', 'B', '2', None, '10.0.0.2')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o',
poll=False),
mock.call('port', '-list', '-gname', 'fakehost')
]
fake_cli.assert_has_calls(expected)
# Test for manaul registration
self.configuration.initiator_auto_registration = False
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_NO_MAP('fakehost')],
('', 0),
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
self.driver.initialize_connection(
test_volume,
self.testData.connector)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
'-gname', 'fakehost', '-o', poll=False),
mock.call('port', '-list', '-gname', 'fakehost')
]
fake_cli.assert_has_calls(expected)
@mock.patch(
"cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_fc_auto_zoning(self):
# Test for auto zoning
self.configuration.zoning_mode = 'fabric'
self.configuration.initiator_auto_registration = False
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
self.testData.GETFCPORT_CMD()]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
('', 0),
self.testData.FC_PORTS]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.zonemanager_lookup_service =\
fc_service.FCSanLookupService(configuration=self.configuration)
conn_info = self.driver.initialize_connection(
self.testData.test_volume,
self.testData.connector)
self.assertEqual(EMCVNXCLIDriverTestData.i_t_map,
conn_info['data']['initiator_target_map'])
self.assertEqual(['1122334455667777'],
conn_info['data']['target_wwn'])
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
'-gname', 'fakehost', '-o',
poll=False),
mock.call('storagegroup', '-list', '-gname', 'fakehost',
poll=True),
mock.call('port', '-list', '-sp')]
fake_cli.assert_has_calls(expected)
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_fc_white_list(self):
self.configuration.io_port_list = 'a-0,B-2'
test_volume = self.testData.test_volume.copy()
test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
self.configuration.initiator_auto_registration = True
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_HAS_MAP_ISCSI('fakehost')],
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
data = self.driver.initialize_connection(
test_volume,
self.testData.connector)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:'
'90:12:34:56', 'A', 0, None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:'
'90:12:34:56', 'B', 2, None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78'
':90:54:32:16', 'A', 0, None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78'
':90:54:32:16', 'B', 2, None, '10.0.0.2')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o',
poll=False),
mock.call('port', '-list', '-gname', 'fakehost')]
fake_cli.assert_has_calls(expected)
self.assertEqual(['5006016A0860080F', '5006016008600195'],
data['data']['target_wwn'])
@mock.patch('random.randint',
mock.Mock(return_value=0))
def test_initialize_connection_fc_port_registered_wl(self):
self.configuration.io_port_list = 'a-0,B-2'
test_volume = self.testData.test_volume.copy()
test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
self.configuration.initiator_auto_registration = True
commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [self.testData.STORAGE_GROUP_ISCSI_FC_HBA('fakehost'),
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
data = self.driver.initialize_connection(
test_volume,
self.testData.connector)
expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=False),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:90'
':12:34:56', 'A', 0, None, '10.0.0.2')),
mock.call(*self.testData.set_path_cmd(
'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78:'
'90:54:32:16', 'A', 0, None, '10.0.0.2')),
mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
'-gname', 'fakehost', '-o',
poll=False),
mock.call('port', '-list', '-gname', 'fakehost')]
fake_cli.assert_has_calls(expected)
self.assertEqual(['5006016A0860080F', '5006016008600195'],
data['data']['target_wwn'])
@mock.patch(
"cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
def test_terminate_connection_remove_zone_false(self):
self.driver = emc_cli_fc.EMCCLIFCDriver(
configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {1: 16, 2: 88, 3: 47}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.cli.zonemanager_lookup_service =\
fc_service.FCSanLookupService(configuration=self.configuration)
connection_info = self.driver.terminate_connection(
self.testData.test_volume,
self.testData.connector)
self.assertFalse(connection_info['data'],
'connection_info data should not be None.')
cli_helper.remove_hlu_from_storagegroup.assert_called_once_with(
16, self.testData.connector["host"])
@mock.patch(
"cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
def test_terminate_connection_remove_zone_true(self):
self.driver = emc_cli_fc.EMCCLIFCDriver(
configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.cli.zonemanager_lookup_service =\
fc_service.FCSanLookupService(configuration=self.configuration)
connection_info = self.driver.terminate_connection(
self.testData.test_volume,
self.testData.connector)
self.assertTrue('initiator_target_map' in connection_info['data'],
'initiator_target_map should be populated.')
self.assertEqual(EMCVNXCLIDriverTestData.i_t_map,
connection_info['data']['initiator_target_map'])
def test_get_volume_stats(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True)]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True)]
self.driverSetup(commands, results)
stats = self.driver.get_volume_stats(True)
self.assertTrue(stats['driver_version'] == VERSION,
"driver_version is incorrect")
self.assertTrue(
stats['storage_protocol'] == 'FC',
"storage_protocol is incorrect")
self.assertTrue(
stats['vendor_name'] == "EMC",
"vendor name is incorrect")
self.assertTrue(
stats['volume_backend_name'] == "namedbackend",
"volume backend name is incorrect")
pool_stats = stats['pools'][0]
expected_pool_stats = {
'free_capacity_gb': 3105.303,
'reserved_percentage': 32,
'location_info': 'unit_test_pool|fakeSerial',
'total_capacity_gb': 3281.146,
'provisioned_capacity_gb': 536.14,
'compression_support': 'True',
'deduplication_support': 'True',
'thin_provisioning_support': True,
'thick_provisioning_support': True,
'max_over_subscription_ratio': 20.0,
'consistencygroup_support': 'True',
'pool_name': 'unit_test_pool',
'fast_cache_enabled': 'True',
'fast_support': 'True'}
self.assertEqual(expected_pool_stats, pool_stats)
def test_get_volume_stats_too_many_luns(self):
commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_GET_ALL_CMD(True),
self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
results = [self.testData.NDU_LIST_RESULT,
self.testData.POOL_GET_ALL_RESULT(True),
self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.check_max_pool_luns_threshold = True
stats = self.driver.get_volume_stats(True)
pool_stats = stats['pools'][0]
self.assertTrue(
pool_stats['free_capacity_gb'] == 0,
"free_capacity_gb is incorrect")
expect_cmd = [
mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
poll=False)]
fake_cli.assert_has_calls(expect_cmd)
self.driver.cli.check_max_pool_luns_threshold = False
stats = self.driver.get_volume_stats(True)
pool_stats = stats['pools'][0]
self.assertTrue(stats['driver_version'] is not None,
"driver_version is incorrect")
self.assertTrue(
pool_stats['free_capacity_gb'] == 3105.303,
"free_capacity_gb is incorrect")
def test_deregister_initiator(self):
fake_cli = self.driverSetup()
self.driver.cli.destroy_empty_sg = True
self.driver.cli.itor_auto_dereg = True
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {1: 16}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
lun_info = {'lun_name': "unit_test_lun",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready"}
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
cli_helper.disconnect_host_from_storage_group = mock.Mock()
cli_helper.delete_storage_group = mock.Mock()
self.driver.terminate_connection(self.testData.test_volume,
self.testData.connector)
fc_itor_1 = '22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56'
fc_itor_2 = '22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16'
expect_cmd = [
mock.call('port', '-removeHBA', '-hbauid', fc_itor_1, '-o'),
mock.call('port', '-removeHBA', '-hbauid', fc_itor_2, '-o')]
fake_cli.assert_has_calls(expect_cmd)
class EMCVNXCLIToggleSPTestData(object):
def FAKE_COMMAND_PREFIX(self, sp_address):
return ('/opt/Navisphere/bin/naviseccli', '-address', sp_address,
'-user', 'sysadmin', '-password', 'sysadmin',
'-scope', 'global')
class EMCVNXCLIToggleSPTestCase(test.TestCase):
def setUp(self):
super(EMCVNXCLIToggleSPTestCase, self).setUp()
self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
self.configuration = mock.Mock(conf.Configuration)
self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
self.configuration.san_ip = '10.10.10.10'
self.configuration.san_secondary_ip = "10.10.10.11"
self.configuration.storage_vnx_pool_name = 'unit_test_pool'
self.configuration.san_login = 'sysadmin'
self.configuration.san_password = 'sysadmin'
self.configuration.default_timeout = 1
self.configuration.max_luns_per_storage_group = 10
self.configuration.destroy_empty_storage_group = 10
self.configuration.storage_vnx_authentication_type = "global"
self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
self.configuration.zoning_mode = None
self.configuration.storage_vnx_security_file_dir = ""
self.cli_client = emc_vnx_cli.CommandLineHelper(
configuration=self.configuration)
self.test_data = EMCVNXCLIToggleSPTestData()
def test_no_sp_toggle(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual("10.10.10.10", self.cli_client.active_storage_ip)
expected = [
mock.call(*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND), check_exit_code=True)]
mock_utils.assert_has_calls(expected)
def test_toggle_sp_with_server_unavailabe(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_ERROR_MSG = """\
Error occurred during HTTP request/response from the target: '10.244.213.142'.
Message : HTTP/1.1 503 Service Unavailable"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [processutils.ProcessExecutionError(
exit_code=255, stdout=FAKE_ERROR_MSG),
FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND),
check_exit_code=True),
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+ FAKE_COMMAND),
check_exit_code=True)]
mock_utils.assert_has_calls(expected)
def test_toggle_sp_with_end_of_data(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_ERROR_MSG = """\
Error occurred during HTTP request/response from the target: '10.244.213.142'.
Message : End of data stream"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [processutils.ProcessExecutionError(
exit_code=255, stdout=FAKE_ERROR_MSG),
FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND),
check_exit_code=True),
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+ FAKE_COMMAND),
check_exit_code=True)]
mock_utils.assert_has_calls(expected)
def test_toggle_sp_with_connection_refused(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_ERROR_MSG = """\
A network error occurred while trying to connect: '10.244.213.142'.
Message : Error occurred because connection refused. \
Unable to establish a secure connection to the Management Server.
"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [processutils.ProcessExecutionError(
exit_code=255, stdout=FAKE_ERROR_MSG),
FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND),
check_exit_code=True),
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+ FAKE_COMMAND),
check_exit_code=True)]
mock_utils.assert_has_calls(expected)
def test_toggle_sp_with_connection_error(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_ERROR_MSG = """\
A network error occurred while trying to connect: '192.168.1.56'.
Message : Error occurred because of time out"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [processutils.ProcessExecutionError(
exit_code=255, stdout=FAKE_ERROR_MSG),
FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND),
check_exit_code=True),
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+ FAKE_COMMAND),
check_exit_code=True)]
mock_utils.assert_has_calls(expected)
class EMCVNXCLIBackupTestCase(DriverTestCaseBase):
def driverSetup(self):
self.context = context.get_admin_context()
self.driver = self.generate_driver(self.configuration)
self.driver.cli._client = mock.Mock()
self.snapshot = fake_snapshot.fake_snapshot_obj(
self.context, **self.testData.test_snapshot)
volume = fake_volume.fake_volume_obj(self.context)
self.snapshot.volume = volume
return self.driver.cli._client
def generate_driver(self, conf):
driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
return driver
@patch.object(emc_vnx_cli.EMCVnxCliBase, 'terminate_connection')
def test_terminate_connection_snapshot(self, terminate_connection):
fake_client = self.driverSetup()
connector = self.testData.connector
smp_name = 'tmp-smp-' + self.snapshot['id']
volume = {'name': smp_name}
self.driver.terminate_connection_snapshot(
self.snapshot, connector)
terminate_connection.assert_called_once_with(
volume, connector)
fake_client.detach_mount_point.assert_called_once_with(
smp_name)
@patch.object(emc_vnx_cli.EMCVnxCliBase, 'initialize_connection')
def test_initialize_connection_snapshot(self, initialize_connection):
fake_client = self.driverSetup()
connector = self.testData.connector
smp_name = 'tmp-smp-' + self.snapshot['id']
self.driver.initialize_connection_snapshot(
self.snapshot, connector)
fake_client.attach_mount_point.assert_called_once_with(
smp_name, self.snapshot['name'])
volume = {'name': smp_name, 'id': self.snapshot['id']}
initialize_connection.assert_called_once_with(
volume, connector)
def test_create_export_snapshot(self):
fake_client = self.driverSetup()
connector = self.testData.connector
smp_name = 'tmp-smp-' + self.snapshot['id']
self.driver.create_export_snapshot(
None, self.snapshot, connector)
fake_client.create_mount_point.assert_called_once_with(
self.snapshot['volume_name'], smp_name)
@patch.object(emc_vnx_cli.EMCVnxCliBase, 'delete_volume')
def test_remove_export_snapshot(self, delete_volume):
self.driverSetup()
smp_name = 'tmp-smp-' + self.snapshot['id']
self.driver.remove_export_snapshot(None, self.snapshot)
volume = {'name': smp_name, 'provider_location': None}
delete_volume.assert_called_once_with(volume, True)
class EMCVNXCLIMultiPoolsTestCase(DriverTestCaseBase):
def generate_driver(self, conf):
driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
return driver
def fake_command_execute_for_driver_setup(self, *command, **kwargv):
if command == ('connection', '-getport', '-address', '-vlanid'):
return self.testData.ALL_PORTS
elif command == ('storagepool', '-list', '-state'):
return self.testData.POOL_GET_STATE_RESULT([
{'pool_name': self.testData.test_pool_name, 'state': "Ready"},
{'pool_name': "unit_test_pool2", 'state': "Ready"},
{'pool_name': "unit_test_pool3", 'state': "Ready"},
{'pool_name': "unit_text_pool4", 'state': "Ready"}])
else:
return SUCCEED
def test_storage_pool_names_option(self):
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': "unit_test_pool, unit_test_pool3",
'volume_backend_name': 'namedbackend'})
driver = self.generate_driver(self.configuration)
self.assertEqual(set(["unit_test_pool", "unit_test_pool3"]),
driver.cli.storage_pools)
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': "unit_test_pool2,",
'volume_backend_name': 'namedbackend'})
driver = self.generate_driver(self.configuration)
self.assertEqual(set(["unit_test_pool2"]),
driver.cli.storage_pools)
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': "unit_test_pool3",
'volume_backend_name': 'namedbackend'})
driver = self.generate_driver(self.configuration)
self.assertEqual(set(["unit_test_pool3"]),
driver.cli.storage_pools)
def test_configured_pool_does_not_exist(self):
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': "unit_test_pool2, unit_test_pool_none2",
'volume_backend_name': 'namedbackend'})
driver = self.generate_driver(self.configuration)
self.assertEqual(set(["unit_test_pool2"]),
driver.cli.storage_pools)
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': "unit_test_pool_none1",
"unit_test_pool_none2"
'volume_backend_name': 'namedbackend'})
self.assertRaises(exception.VolumeBackendAPIException,
self.generate_driver,
self.configuration)
def test_no_storage_pool_is_configured(self):
self.configuration.safe_get = self.fake_safe_get(
{'storage_vnx_pool_names': None,
'volume_backend_name': 'namedbackend'})
driver = self.generate_driver(self.configuration)
self.assertEqual(set(),
driver.cli.storage_pools)
VNXError = emc_vnx_cli.VNXError
class VNXErrorTest(test.TestCase):
def test_has_error(self):
output = "The specified snapshot name is already in use. (0x716d8005)"
self.assertTrue(VNXError.has_error(output))
def test_has_error_with_specific_error(self):
output = "The specified snapshot name is already in use. (0x716d8005)"
has_error = VNXError.has_error(output, VNXError.SNAP_NAME_EXISTED)
self.assertTrue(has_error)
has_error = VNXError.has_error(output, VNXError.LUN_ALREADY_EXPANDED)
self.assertFalse(has_error)
def test_has_error_not_found(self):
output = "Cannot find the consistency group."
has_error = VNXError.has_error(output)
self.assertTrue(has_error)
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
def test_has_error_not_exist(self):
output = "The specified snapshot does not exist."
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
output = "The (pool lun) may not exist."
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
def test_has_error_multi_line(self):
output = """Could not retrieve the specified (pool lun).
The (pool lun) may not exist."""
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
def test_has_error_regular_string_false(self):
output = "Cannot unbind LUN because it's contained in a Storage Group."
has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
self.assertFalse(has_error)
def test_has_error_multi_errors(self):
output = "Cannot unbind LUN because it's contained in a Storage Group."
has_error = VNXError.has_error(output,
VNXError.LUN_IN_SG,
VNXError.GENERAL_NOT_FOUND)
self.assertTrue(has_error)
output = "Cannot unbind LUN because it's contained in a Storage Group."
has_error = VNXError.has_error(output,
VNXError.LUN_ALREADY_EXPANDED,
VNXError.LUN_NOT_MIGRATING)
self.assertFalse(has_error)
| true | true |
f7ff545f5f40f2327550448510c2c6e5bd940770 | 380 | py | Python | exps/default/yolox_l_leaky.py | shachargluska/YOLOX | 94caf37f4c7c81dd7305c74b0c71dc8c4813c7a5 | [
"Apache-2.0"
] | null | null | null | exps/default/yolox_l_leaky.py | shachargluska/YOLOX | 94caf37f4c7c81dd7305c74b0c71dc8c4813c7a5 | [
"Apache-2.0"
] | null | null | null | exps/default/yolox_l_leaky.py | shachargluska/YOLOX | 94caf37f4c7c81dd7305c74b0c71dc8c4813c7a5 | [
"Apache-2.0"
] | 1 | 2022-02-28T07:04:39.000Z | 2022-02-28T07:04:39.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import os
from yolox.exp import Exp as MyExp
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.depth = 1.0
self.width = 1.0
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
self.act='lrelu'
| 22.352941 | 82 | 0.607895 |
import os
from yolox.exp import Exp as MyExp
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.depth = 1.0
self.width = 1.0
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
self.act='lrelu'
| true | true |
f7ff547d02fff493b43e2ca2a1a68c050d95678a | 52 | py | Python | auxjad/indicators/__init__.py | gilbertohasnofb/auxjad | 553b7fe97221b6f378a93ade6262f024e3cbc678 | [
"MIT"
] | 6 | 2020-05-18T09:28:29.000Z | 2021-12-22T00:40:54.000Z | auxjad/indicators/__init__.py | gilbertohasnofb/auxjad | 553b7fe97221b6f378a93ade6262f024e3cbc678 | [
"MIT"
] | 1 | 2021-04-21T20:29:38.000Z | 2021-04-22T19:44:54.000Z | auxjad/indicators/__init__.py | gilbertohasnofb/auxjad | 553b7fe97221b6f378a93ade6262f024e3cbc678 | [
"MIT"
] | 1 | 2021-04-21T18:54:46.000Z | 2021-04-21T18:54:46.000Z | """
indicators
==========
Auxjad's indicators.
"""
| 7.428571 | 20 | 0.519231 | true | true | |
f7ff5588f53a6a514c35a01b6b7b964de646f8a6 | 77,588 | py | Python | xbrl_to_json.py | scoofy/xbrl-to-json | 8783425c55fdc069055440a7149d9a3fdf517141 | [
"Apache-2.0"
] | 14 | 2019-02-13T03:13:00.000Z | 2022-02-12T19:28:39.000Z | xbrl_to_json.py | scoofy/xbrl-to-json | 8783425c55fdc069055440a7149d9a3fdf517141 | [
"Apache-2.0"
] | 1 | 2019-05-01T10:16:30.000Z | 2019-07-19T22:12:08.000Z | xbrl_to_json.py | scoofy/xbrl-to-json | 8783425c55fdc069055440a7149d9a3fdf517141 | [
"Apache-2.0"
] | 2 | 2019-09-23T17:48:57.000Z | 2020-06-01T15:25:18.000Z | import sys, os, shutil, logging, datetime, json, time, copy, re, random
import urllib.request
import bs4, anytree, anytree.exporter, anytree.importer
import xml.etree.ElementTree as ET
import pprint as pp
logging.basicConfig(format=' ---- %(filename)s|%(lineno)d ----\n%(message)s', level=logging.INFO)
clarks_to_ignore = ['http://www.w3.org/2001/XMLSchema',
'http://www.xbrl.org/2003/instance',
'http://www.xbrl.org/2003/linkbase',
'http://xbrl.org/2006/xbrldi',
]
unit_ref_list = []
MONTH_IN_SECONDS = 60.0 * 60 * 24 * 7 * 30
ANNUAL_FORM_TYPES = ["10-K", "20-F", "40-F"]
PREFIXES_THAT_MATTER = ["us-gaap", "dei", "srt", "country", "stpr", "custom"]
US_COUNTRY_CODES = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DE", "DC", "FL", "GA", "HI", "ID", "IL",
"IN", "IA", "KS", "KY", "LA", "ME", "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE",
"NV", "NH", "NJ","NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC", "SD",
"TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY", "X1", ]
CANADA_COUNTRY_CODES = ["A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "A8", "A9", "B0", "Z4",]
def main_xbrl_to_json_converter(ticker, cik, date, folder_path, sic=None, country_code=None, delete_files_after_import=False):
root_node_dict = {}
potential_json_filename = return_xbrl_to_json_converted_filename_with_date(folder_path, ticker, date)
# logging.info(potential_json_filename)
''' first we try to import the json files'''
try:
root_json = import_json(potential_json_filename)
root_node = convert_dict_to_node_tree(root_json)
except Exception as e:
logging.error(e)
root_node = None
''' if we don't have the root node, we need to get it from the actual files '''
if not root_node:
logging.info("json file does not already exist, creating one...")
list_of_filenames_in_directory = os.listdir(folder_path)
''' we're going to iterate through the relevant xml/xsd files '''
for filename in list_of_filenames_in_directory:
if filename.endswith(".xml") or filename.endswith(".xsd"):
xbrl_filename = os.path.join(folder_path, filename)
logging.info("processing xbrl file: {}".format(xbrl_filename))
''' here we generate the root node '''
root_node = xbrl_to_json_processor(xbrl_filename, ticker, write_file=testing_write_file, write_txt_file=testing_write_file)
logging.info("done")
root_node_dict[filename] = root_node
''' here we build up the whole tree '''
fact_tree_root = fact_centric_xbrl_processor(root_node_dict, ticker, sic, country_code)
write_txt_file = not delete_files_after_import # if we're deleting files, lets not save a render.txt file
''' here get the root of the whole tree '''
root_node = xbrl_to_json_processor(potential_json_filename, ticker, root_node=fact_tree_root, write_file=True, write_txt_file=write_txt_file)
''' this is an important ^^^ function '''
if delete_files_after_import:
if os.path.isdir(folder_path):
shutil.rmtree(folder_path)
potential_txt_file = "{}.json_render.txt".format(folder_path)
if os.path.isfile(potential_txt_file):
os.remove(potential_txt_file)
return root_node
def return_refernce_node(node, fact_tree_root, other_tree_root, ticker):
''' we create a refernce node for the underlying node if it doesn't exist,
this will act as a parent node, since many nodes are related, but not directly.
Note that this will be a "parent node" in the sense that...
well, in the sense that it's a folder that contains related items
'''
local_prefixes_that_matter = PREFIXES_THAT_MATTER + [ticker.lower()]
reference_node = None
locator = None
href = node.attrib.get("{http://www.w3.org/1999/xlink}href")
if href:
locator = return_xlink_locator(node)
else:
if node.clark not in clarks_to_ignore:
locator = node.suffix
if locator:
''' if a locator is present, this should be a fact item.
here, with the prefixes_that_matter we're looking at locators that start with
us-gaap_ or similar. This is, unfortunately, pretty common.
It seems to creep in, and it'd be nice to avoid it in the refernces.
'''
locator_prefix = None
modified_locator = None
''' check for the prefixes '''
for prefix in local_prefixes_that_matter:
if locator.startswith("{}_".format(prefix)):
locator_prefix = prefix
modified_locator = locator.replace("{}_".format(prefix), "")
''' next we look for a potential existing parent/reference node by name '''
if modified_locator:
reference_node = anytree.search.find_by_attr(fact_tree_root, modified_locator)
else:
reference_node = anytree.search.find_by_attr(fact_tree_root, locator)
''' if there is not a parent/reference node, we make one '''
if not reference_node:
''' so all that follows is to try and mimic the structure of the base fact file with prefixes '''
if modified_locator:
if locator_prefix:
reference_node = anytree.Node(modified_locator,
parent=fact_tree_root,
prefix=locator_prefix,
suffix=modified_locator)
else:
reference_node = anytree.Node(modified_locator,
parent=fact_tree_root,
suffix=modified_locator)
else:
reference_node = anytree.Node(locator,
parent=fact_tree_root,
suffix=locator)
''' i'm going to try and associate the type of fact with the
reference node
'''
try:
''' maybe a prefix already exists'''
existing_prefix = reference_node.prefix
except:
''' it doesn't so lets try and find one '''
node_prefix=None
''' we want the node to have a fact
(because we are looking for fact prefix from the main .xml file)
'''
try:
node_fact = node.fact
except:
'probably not a fact node'
node_fact = ""
if node_fact != "":
try:
node_prefix = node.prefix
except:
node_prefix = None
if node_prefix:
reference_node.prefix = node_prefix
return reference_node
else:
''' this is a contextual item
we will put this item in to the "other tree root" tree and deal with it later
'''
xbrli_node = anytree.search.find_by_attr(other_tree_root, "{{{}}}{}".format(node.clark, node.suffix))
if not xbrli_node:
xbrli_node = anytree.Node("{{{}}}{}".format(node.clark, node.suffix),
parent=other_tree_root,
suffix=node.suffix)
return xbrli_node
def fact_centric_xbrl_processor(root_node_dict, ticker, sic, country_code, sort_trash_for_debugging=False):
fact_tree_root = anytree.Node(ticker)
other_tree_root = anytree.Node('xbrli')
trash_tree_root = anytree.Node('unsorted_trash')
parent_child_tuple_list = []
'''Here i'm going to attempt to order the files, to facilitate normal order:'''
extension_order_after_primary_file_list = [".xsd", "_lab.xml", "_def.xml", "_cal.xml", "_pre.xml"]
ordered_filename_list = []
for extention in extension_order_after_primary_file_list:
for filename in root_node_dict.keys():
if filename.endswith(extention):
ordered_filename_list.append(filename)
'''now add the primary file:'''
for filename in root_node_dict.keys():
if filename not in ordered_filename_list:
ordered_filename_list.insert(0, filename)
'''here, we're just looking to see if a top level fact reference exists
(could be made more efficient in the future, but limited)
'''
logging.info("Start initial sorting:")
start_time = time.time()
for filename in ordered_filename_list:
logging.info(filename)
''' grab the root node '''
root_node = root_node_dict.get(filename)
''' iterate through and look for suffixes, if one is missing, there's a major error '''
for node in anytree.PreOrderIter(root_node):
try:
suffix = node.suffix
except:
logging.error("there is a problem with this node... it has no 'suffix' attribute")
#logging.info(pp.pformat(vars(node)))
sys.exit()
''' we create a refernce node if it doesn't exist,
why: we're trying to prevent dublicates
now let's pair it with that node
'''
reference_node = return_refernce_node(node, fact_tree_root, other_tree_root, ticker)
parent_child_tuple_list.append((reference_node, node))
''' we iterate through, check for duplicates'''
for reference_node, child in parent_child_tuple_list:
''' now lets unite all these nodes together '''
unique = True
''' here we go through to check for duplicates '''
for existing_child in reference_node.children:
if vars(child) == vars(existing_child):
'''this prevents lots of redundant nodes'''
unique = False
if unique == False:
break
if unique == True:
''' if we have a unique parent child relationship, we map it
that is we attach the parent/reference node as the parent of our node
'''
child.parent = reference_node
else:
''' here, our node is a duplicate, and so we throw it out '''
child.parent = trash_tree_root
''' at this point we should have a basic tree structure with the base as the ticker'''
print_root_node_lengths(fact_tree_root, other_tree_root, trash_tree_root)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
''' now let's need to pair more of the other refernces with our facts'''
logging.info("Start deep sorting:")
start_time = time.time()
''' make a quick sort dict '''
fact_tree_children_dict = {node.suffix: node for node in fact_tree_root.children}
''' got to the other other_tree_root and try to pair the stuff left over '''
for node in anytree.PreOrderIter(other_tree_root):
replacement_parent = return_new_parent(node, fact_tree_children_dict)
if replacement_parent:
node.parent = replacement_parent
print_root_node_lengths(fact_tree_root, other_tree_root, trash_tree_root)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
#fact_tree_children_dict = {node.suffix: node for node in fact_tree_root.children}
logging.info("Start deep sorting second pass:")
start_time = time.time()
for node in anytree.PreOrderIter(other_tree_root):
replacement_parent = return_new_parent_round_two(node, fact_tree_children_dict)
if replacement_parent:
node.parent = replacement_parent
print_root_node_lengths(fact_tree_root, other_tree_root, trash_tree_root)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
logging.info("Start contextRef sorting:")
start_time = time.time()
for node in anytree.PreOrderIter(fact_tree_root):
replacement_parent = return_new_parent_for_Axis_contextRefs(node)
if replacement_parent:
node.parent = replacement_parent
print_root_node_lengths(fact_tree_root, other_tree_root, trash_tree_root)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
logging.info("Create context refs dict:")
start_time = time.time()
convert_context_refs_into_id_keyed_dict(fact_tree_root, other_tree_root, trash_tree_root, sic, country_code)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
if sort_trash_for_debugging:
logging.info("Sort trash file:")
start_time = time.time()
trash_tree_root = keep_trash_sorted(trash_tree_root)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
logging.info("Saving text files")
start_time = time.time()
# the following are for testing:
if testing:
fact_tree_root_filename = ticker + "_facts"
root_node_to_rendertree_text_file(fact_tree_root, fact_tree_root_filename)
other_tree_root_filename = ticker + "_xbrli"
root_node_to_rendertree_text_file(other_tree_root, other_tree_root_filename)
trash_filename = ticker + "_trash"
root_node_to_rendertree_text_file(trash_tree_root, trash_filename)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
return fact_tree_root
def convert_context_refs_into_id_keyed_dict(fact_tree_root, other_tree_root, trash_tree_root, sic, country_code):
'converts contexts refs into a dict, and adds country code and sic'
context_node = None
period_node_list = []
for child in list(other_tree_root.children):
if child.suffix == "context":
context_node = child
elif child.suffix in ["startDate", "endDate", "instant", "forever"]:
period_node_list.append(child)
context_dict = {}
for period_node in period_node_list:
for node in anytree.PreOrderIter(period_node):
try:
existing_entry = context_dict.get(node.parent_id)
except:
continue
if node.parent.suffix == "measure":
continue
if existing_entry is None:
context_dict[node.parent_id] = node.fact
else: # entry already exists
if node.suffix == "startDate":
new_entry = node.fact + ":" + existing_entry
elif node.suffix == "endDate":
new_entry = existing_entry + ":" + node.fact
elif node.suffix == "instant":
logging.error("This should not happen. Examine this code error")
sys.exit()
context_dict[node.parent_id] = new_entry
node.parent = trash_tree_root
for node in anytree.PreOrderIter(context_node):
node.parent = trash_tree_root
context_dict_node = anytree.Node("context_dict", parent=fact_tree_root, attrib = context_dict)
context_sic_node = anytree.Node("sic", parent=fact_tree_root, attrib = sic)
context_country_code_node = anytree.Node("country_code", parent=fact_tree_root, attrib = country_code)
def keep_trash_sorted(trash_tree_root):
sorted_trash_tree_root = anytree.Node('trash')
for node in anytree.PreOrderIter(trash_tree_root):
success = False
if node.parent:
for sorted_node in anytree.PreOrderIter(sorted_trash_tree_root):
if sorted_node.parent:
if vars(node) == vars(sorted_node):
success = True
node.parent = sorted_node
break
if not success:
node.parent = sorted_trash_tree_root
logging.info("old trash tree")
logging.info(anytree.RenderTree(trash_tree_root))
return sorted_trash_tree_root
def print_root_node_lengths(fact_tree_root, other_tree_root, trash_tree_root):
fact_tree_root_len = len(list(anytree.PreOrderIter(fact_tree_root)))
other_tree_root_len = len(list(anytree.PreOrderIter(other_tree_root)))
trash_tree_root_len = len(list(anytree.PreOrderIter(trash_tree_root)))
logging.info("facts:\t{}\tother:\t{}\ttrash:\t{}".format(fact_tree_root_len, other_tree_root_len, trash_tree_root_len))
def return_new_parent(node, fact_tree_children_dict):
# step 1: recursion
try:
parent_id = node.parent_id
except:
parent_id = None
if parent_id:
parent = fact_tree_children_dict.get(parent_id)
if parent:
return parent
# step 2: start check dimension
try:
dimension = node.attrib.get("dimension")
except:
dimension = None
if dimension:
dimension_parent_id = dimension.split(":")[-1]
parent = fact_tree_children_dict.get(dimension_parent_id)
if parent:
return parent
dimension_underscore = dimension.replace(":", "_")
parent = fact_tree_children_dict.get(dimension_underscore)
if parent:
return parent
# step 3 check label
try:
label = node.attrib.get("{http://www.w3.org/1999/xlink}label")
except:
label = None
if label:
for suffix, tree_node in fact_tree_children_dict.items():
if suffix in label:
try:
parent_label = tree_node.attrib.get("{http://www.w3.org/1999/xlink}label")
except:
parent_label = None
if parent_label:
if label == parent_label:
return tree_node
parent = recursive_label_node_getter(tree_node, label)
if parent:
return parent
# step 5: from and to attributes
try:
from_attrib = node.attrib.get("{http://www.w3.org/1999/xlink}from")
to_attrib = node.attrib.get("{http://www.w3.org/1999/xlink}to")
except:
from_attrib = None
to_attrib = None
if from_attrib and to_attrib:
''' i decided not to make copies, but instead, just leave them in the from attributes'''
# to attribute (make copy)
'''
for suffix, tree_node in fact_tree_children_dict.items():
if suffix in to_attrib:
try:
parent_label = tree_node.attrib.get("{http://www.w3.org/1999/xlink}label")
except:
parent_label = None
if parent_label:
if to_attrib == parent_label:
to_node = copy.copy(node)
to_node.parent = tree_node
break
to_parent = recursive_label_node_getter(tree_node, to_attrib)
if to_parent:
to_node = copy.copy(node)
to_node.parent = tree_node
break
'''
# from attribute (return node)
for suffix, tree_node in fact_tree_children_dict.items():
if suffix in from_attrib:
try:
parent_label = tree_node.attrib.get("{http://www.w3.org/1999/xlink}label")
except:
parent_label = None
if parent_label:
if from_attrib == parent_label:
return tree_node
parent = recursive_label_node_getter(tree_node, from_attrib)
if parent:
return parent
# step 4 roles
try:
role = node.attrib.get("{http://www.w3.org/1999/xlink}role")
except:
role = None
if role:
parent = fact_tree_children_dict.get(role.split("/")[-1])
if parent:
return parent
def return_new_parent_round_two(node, fact_tree_children_dict):
look_up_list = ["name", "{http://www.w3.org/1999/xlink}from", "id"]
for item in look_up_list:
try:
attribute = node.attrib.get(item)
except:
attribute = None
if attribute:
for suffix, tree_node in fact_tree_children_dict.items():
if suffix == attribute:
return tree_node
elif suffix in attribute:
parent = recursive_node_id_getter(tree_node, attribute)
if parent:
return parent
parent = recursive_label_node_getter(tree_node, attribute)
if parent:
return parent
def return_new_parent_for_Axis_contextRefs(node):
try:
contextRef = node.attrib.get('contextRef')
except:
return
if contextRef is None:
return
split_contextRef = contextRef.split("_")
if len(split_contextRef) == 1:
return
if "Axis" in contextRef or "Member" in contextRef:
for index, sub_string in enumerate(split_contextRef):
if sub_string.endswith("Axis") or sub_string.endswith("Member"):
parent = node.parent
for child in parent.children:
if child.suffix == sub_string:
return child
# we should have establish there is no pre-existing subparent if we are here
subparent = anytree.Node(sub_string,
parent = parent,
# node_order = node_order,
suffix = sub_string,
axis = True
)
return subparent
def recursive_node_id_getter(node, original_id):
try:
potential_id_match = node.attrib.get("id")
except:
potential_id_match = None
if potential_id_match:
if original_id == potential_id_match:
return node
for child in node.children:
parent = recursive_node_id_getter(child, original_id)
if parent:
return parent
def recursive_label_node_getter(node, original_label):
try:
potential_match = node.attrib.get("{http://www.w3.org/1999/xlink}label")
except:
potential_match = None
if potential_match:
if original_label == potential_match:
return node
if original_label == potential_match.replace("loc_", "lab_"):
return node
for child in node.children:
parent = recursive_label_node_getter(child, original_label)
if parent:
return parent
def other_tree_node_replacement(attribute_list, fact_tree_root_children):
replacement_node = None
for child in fact_tree_root_children:
for attribute in attribute_list:
if attribute == child.suffix:
replacement_node = child
if replacement_node:
return replacement_node
if not replacement_node:
for attribute in attribute_list:
new_attr = attribute.replace(":", "_")
if new_attr == child.suffix:
replacement_node = child
if replacement_node:
return replacement_node
if not replacement_node:
for attribute in attribute_list:
try:
new_attr = attribute.split(":")[-1]
except:
continue
if new_attr == child.suffix:
replacement_node = child
if replacement_node:
return replacement_node
if not replacement_node:
for attribute in attribute_list:
try:
new_attr = attribute.split("_")[-1]
except:
continue
if new_attr == child.suffix:
replacement_node = child
if replacement_node:
return replacement_node
return replacement_node
def xbrl_to_json_processor(xbrl_filename, ticker, root_node=None, write_file=False, write_txt_file=False):
if not (xbrl_filename or root_node):
logging.error("You must include a either a filename or root_node")
sys.exit()
if not root_node:
root_node = process_xbrl_file_to_tree(xbrl_filename, ticker)
#print(anytree.RenderTree(root_node))
flat_file_dict = convert_tree_to_dict(root_node)
if write_file:
should_be_json_filename = xbrl_filename
if not should_be_json_filename.endswith(".json"):
should_be_json_filename = should_be_json_filename + ".json"
write_dict_as_json(flat_file_dict, should_be_json_filename)
if write_txt_file:
root_node_to_rendertree_text_file(root_node, should_be_json_filename)
return root_node
def custom_render_tree(root_node):
output_str = ""
for pre, _, node in anytree.RenderTree(root_node):
fact = ""
formatted_fact = ""
attrib = ""
formatted_attrib = ""
try:
fact = node.fact
attrib = node.attrib
except:
pass
if fact:
formatted_fact = "\n{}{}".format(pre, fact)
if attrib:
formatted_attrib = "\n{}{}".format(pre, attrib)
formatted_str = "{}{}{}{}\n".format(pre, node.name, formatted_fact, formatted_attrib)
output_str = output_str + "\n" + formatted_str
return output_str
def root_node_to_rendertree_text_file(root_node, xbrl_filename, custom=False):
with open('{}_render.txt'.format(xbrl_filename), 'w') as outfile:
if custom:
output_str = custom_render_tree(root_node)
else:
output_str = str(anytree.RenderTree(root_node))
outfile.write(output_str)
def recursive_iter(xbrl_element, reversed_ns, ticker, parent=None, node_order=0):
elements = []
clark, prefix, suffix = xbrl_clark_prefix_and_suffix(xbrl_element, reversed_ns)
fact = xbrl_element.text
if isinstance(fact, str):
fact = fact.strip()
if fact is None:
fact = ""
attrib = xbrl_element.attrib
parent_id = None
if fact:
try:
parent_id = parent.attrib.get("id")
if parent_id is None:
if parent.suffix == "period":
grandparent = parent.parent
# use parent_id for simpler code
parent_id = grandparent.attrib.get("id")
except:
pass
if parent_id and fact:
node_element = anytree.Node(suffix,
parent = parent,
parent_id = parent_id,
#node_order= node_order,
clark = clark,
prefix = prefix,
suffix = suffix,
fact = fact,
attrib = attrib,
)
else:
node_element = anytree.Node(suffix,
parent = parent,
#node_order= node_order,
clark = clark,
prefix = prefix,
suffix = suffix,
fact = fact,
attrib = attrib,
)
elements.append(node_element)
subtag_count_dict = {}
for element in xbrl_element:
count = subtag_count_dict.get(element.tag)
if count is None:
subtag_count_dict[element.tag] = 1
count = 0
else:
subtag_count_dict[element.tag] = count + 1
sub_elements = recursive_iter(element,
reversed_ns,
ticker,
parent=node_element,
node_order=count,
)
for element_sub2 in sub_elements:
elements.append(element_sub2)
return elements
def process_xbrl_file_to_tree(xbrl_filename, ticker):
logging.info(xbrl_filename)
tree, ns, root = extract_xbrl_tree_namespace_and_root(xbrl_filename)
#print(root)
reversed_ns = {value: key for key, value in ns.items()}
elements = recursive_iter(root, reversed_ns, ticker)
#print(len(elements))
xbrl_tree_root = elements[0]
return xbrl_tree_root
def convert_tree_to_dict(root_node):
exporter = anytree.exporter.JsonExporter(indent=2, sort_keys=True)
json_dict = json.loads(exporter.export(root_node))
return json_dict
def convert_dict_to_node_tree(dict_to_convert):
importer = anytree.importer.JsonImporter()
json_str = json.dumps(dict_to_convert)
root_node = importer.import_(json_str)
return root_node
#### utils ####
def extract_xbrl_tree_namespace_and_root(xbrl_filename):
ns = {}
try:
for event, (name, value) in ET.iterparse(xbrl_filename, ['start-ns']):
if name:
ns[name] = value
except Exception as e:
logging.error(e)
return[None, None, None]
tree = ET.parse(xbrl_filename)
root = tree.getroot()
#logging.info([tree, ns, root])
return [tree, ns, root]
def xbrl_clark_prefix_and_suffix(xbrl_element, reversed_ns):
clark, suffix = xbrl_element.tag[1:].split("}")
prefix = reversed_ns.get(clark)
return [clark, prefix, suffix]
def xbrl_ns_clark(xbrl_element):
'''return clark notation prefix'''
return xbrl_element.tag.split("}")[0][1:]
def xbrl_ns_prefix(xbrl_element, ns):
return [key for key, value in ns.items() if xbrl_ns_clark(xbrl_element) == value][0]
def xbrl_ns_suffix(xbrl_element):
return xbrl_element.tag.split("}")[1]
def return_xlink_locator(element_with_href):
href = element_with_href.attrib.get("{http://www.w3.org/1999/xlink}href")
href_list = href.split("#")
if len(href_list) > 1:
href = href_list[-1]
return href
def import_json(json_filename):
logging.info("importing: {}".format(json_filename))
with open(json_filename, 'r') as inputfile:
data_dict = json.load(inputfile)
return data_dict
def write_dict_as_json(dict_to_write, json_filename):
logging.info("writing: {}".format(json_filename))
with open(json_filename, 'w') as outfile:
json.dump(dict_to_write, outfile, indent=2)
def form_type_conversion(form_type, country_code, us_codes=US_COUNTRY_CODES, ca_codes=CANADA_COUNTRY_CODES):
logging.info(country_code)
if form_type == "10-Q":
if country_code not in us_codes:
return None
elif form_type in ANNUAL_FORM_TYPES:
if country_code in us_codes:
return "10-K"
elif country_code in ca_codes:
return "40-F"
else:
return "20-F"
else:
return None
def folder_path_form_type_conversion(ticker, form_type):
make_folder = False
folder_path = return_xbrl_data_formatted_folder_path(ticker, form_type)
if not does_file_exist_in_dir(folder_path):
if form_type in ANNUAL_FORM_TYPES:
us_path = return_xbrl_data_formatted_folder_path(ticker, "10-K")
ca_path = return_xbrl_data_formatted_folder_path(ticker, "40-F")
adr_path = return_xbrl_data_formatted_folder_path(ticker, "20-F")
if does_file_exist_in_dir(us_path):
form_type = '10-K'
folder_path = us_path
#logging.info("US company path generated")
elif does_file_exist_in_dir(ca_path):
form_type = '40-F'
folder_path = ca_path
#logging.info("Canadian company path generated")
elif does_file_exist_in_dir(adr_path):
form_type = '20-F'
folder_path = adr_path
#logging.info("International company path generated")
else:
make_folder = True
else:
make_folder = True
return folder_path, form_type, make_folder
def does_file_exist_in_dir(path):
'is there a file in this path'
try:
return any(os.path.isfile(os.path.join(path, i)) for i in os.listdir(path))
except:
return None
#### xbrl from sec ####
def return_url_request_data(url, values_dict={}, secure=False, sleep=1):
time.sleep(sleep)
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9"}
http__prefix = "http://"
https_prefix = "https://"
if secure:
url_prefix = https_prefix
else:
url_prefix = http__prefix
if "http://" in url or "https://" in url:
url_prefix = ""
url = url_prefix + url
encoded_url_extra_values = urllib.parse.urlencode(values_dict)
data = encoded_url_extra_values.encode('utf-8')
#logging.warning("\n{}\n{}\n{}".format(url, data, headers))
if data:
request = urllib.request.Request(url, data, headers=headers)
else:
request = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(request) # get request
response_data = response.read().decode('utf-8')
return response_data
def sec_xbrl_single_stock(cik, form_type):
base_url = "https://www.sec.gov/cgi-bin/browse-edgar"
values = {"action": "getcompany",
"CIK": cik,
"type": form_type,
}
response_data = return_url_request_data(base_url, values, secure=True)
return response_data
def parse_sec_results_page(sec_response_data, cik, form_type, date="most recent", previous_error=False):
soup = bs4.BeautifulSoup(sec_response_data, 'html.parser')
if form_type == "10-K":
ten_k_soup = soup
#logging.info(soup.prettify())
identInfo = soup.find_all("p", class_="identInfo")[0]
linked_data = identInfo.find_all("a")
previous_item = None
sic = None
country_code = None
for item in linked_data:
# e.g.
# /cgi-bin/browse-edgar?action=getcompany&SIC=7370&owner=exclude&count=40
# /cgi-bin/browse-edgar?action=getcompany&State=F4&owner=exclude&count=40
href = item.get("href")
if "&SIC=" in href:
sic = item.text
elif "&State=" in href:
country_code = item.text
new_form_type = form_type_conversion(form_type, country_code)
if new_form_type != form_type:
logging.info("{} vs {}".format(new_form_type, form_type))
form_type = new_form_type
logging.info("-"*2000)
logging.info(form_type)
new_response_data = sec_xbrl_single_stock(cik, form_type)
soup = bs4.BeautifulSoup(new_response_data, 'html.parser')
if not sic and country_code:
raise(Exception)
table_list = soup.find_all("table", {"summary": "Results"})
#logging.info(len(table_list))
if not len(table_list) == 1:
logging.error("something's up here")
table = table_list[0]
logging.info(table)
document_button_list = table.find_all("a", {"id":"documentsbutton"})
logging.info(document_button_list)
if not document_button_list:
if form_type not in ["10-K", "10-Q"]:
logging.warning("something else is up here")
soup = ten_k_soup
table_list = soup.find_all("table", {"summary": "Results"})
#logging.info(len(table_list))
if not len(table_list) == 1:
logging.error("something's up here")
table = table_list[0]
logging.info(table)
document_button_list = table.find_all("a", {"id":"documentsbutton"})
logging.info(document_button_list)
if document_button_list:
form_type = "10-K"
if not date:
date = "most recent"
if date == "most recent":
relevant_a_tag = table.find("a", {"id":"documentsbutton"})
if previous_error:
relevant_interactive_tag = table.find("a", {"id":"interactiveDataBtn"})
tag_parent = relevant_interactive_tag.parent
relevant_a_tag = tag_parent.find("a", {"id":"documentsbutton"})
else:
year = date[:4]
month = date[4:6]
day = date[6:]
logging.info("{}-{}-{}".format(year, month, day))
relevant_td = table.find("td", string="{}-{}-{}".format(year, month, day))
relevant_td_parent = None
if not relevant_td:
relevant_interactive_tags = table.find_all("a", {"id":"interactiveDataBtn"})
tag_parents = [tag.parent.parent for tag in relevant_interactive_tags]
if tag_parents:
# i'm going to get clever here, and count backwards through the months
# starting with the listed month, to find the nearest previous entry
# if the month is correct, it should work the first time
# if you encounter an error here, that's what's happening
for i in reversed(range(int(month))):
month_str = str(i+1).zfill(2)
date_str = "{}-{}".format(year, month_str)
for parent in tag_parents:
if date_str in parent.text:
for child in parent.children:
if child.string:
if date_str in child.string:
relevant_td = child
if relevant_td:
break
if relevant_td:
break
if relevant_td:
break
relevant_td_parent = relevant_td.parent
relevant_a_tag = relevant_td_parent.find("a", {"id":"documentsbutton"})
logging.info(relevant_a_tag)
relevant_a_href = relevant_a_tag['href']
sec_url = "https://www.sec.gov"
relevant_xbrl_url = sec_url + relevant_a_href
return relevant_xbrl_url, sic, country_code, form_type
def write_xbrl_file(file_name, sec_response_data):
with open(file_name, 'w') as outfile:
outfile.write(sec_response_data)
def return_xbrl_data_formatted_folder_path(ticker, form_type):
return os.path.join("XBRL_Data", ticker, form_type)
def return_most_recent_xbrl_to_json_converted_filename(folder_path, ticker):
filename = find_most_recent_filename_from_date(folder_path, ticker)
return os.path.join(folder_path, filename)
def return_xbrl_to_json_converted_filename_with_date(folder_path, ticker, date):
ticker_date = "{}-{}".format(ticker, date)
if folder_path.endswith(ticker_date):
filename = folder_path + ".json"
else:
filename = ticker_date + ".json"
filename = os.path.join(folder_path, filename)
return filename
def find_most_recent_filename_from_date(folder_path, ticker):
pattern = re.compile(ticker.lower() + r"-[0-9]{8}")
most_recent_folder_date = 0
most_recent_filename = None
for filename in os.listdir(folder_path):
#logging.info(filename)
if filename.endswith(".json"):
if ticker.lower() in filename:
if pattern.search(filename):
ticker_hyphen_date = filename.replace(".json", "")
folder_date = ticker_hyphen_date.split("-")[1]
if int(folder_date) > most_recent_folder_date:
most_recent_folder_date = int(folder_date)
most_recent_filename = filename
return most_recent_filename
def get_xbrl_files_and_return_folder_name(ticker, xbrl_data_page_response_data, form_type, url_in_case_of_error=None):
soup = bs4.BeautifulSoup(xbrl_data_page_response_data, 'html.parser')
table_list = soup.find_all("table", {"summary": "Data Files"})
if not len(table_list) == 1:
logging.error("something's up here")
#logging.info(pp.pformat(table_list))
if not table_list:
logging.error("Likely refering to a sec page without XBRL, manually check the url")
logging.error(url_in_case_of_error)
return "Error: No Table"
table = table_list[0]
a_tag_list = table.find_all("a")
sec_url = "https://www.sec.gov"
folder_name = None
data_date = None
for a in a_tag_list:
href = a["href"]
file_name = a.text
if not folder_name:
if "_" not in file_name:
folder_name = file_name.split(".")[0]
else:
# grmn-20181229_def.xml
folder_name = file_name.split("_")[0]
data_date = folder_name.split("-")[1]
# logging.info(ticker, form_type, folder_name, file_name)
full_file_name = os.path.join("XBRL_Data", ticker, form_type, folder_name, file_name)
full_folders_name = os.path.join("XBRL_Data", ticker, form_type, folder_name)
if not os.path.exists(full_folders_name):
os.makedirs(full_folders_name)
else:
if os.path.exists(full_file_name):
logging.info("Data for {} already exists".format(ticker))
return full_folders_name, data_date
full_url = sec_url + href
response_data = return_url_request_data(full_url)
write_xbrl_file(full_file_name, response_data)
return full_folders_name, data_date
def full_sec_xbrl_folder_download(ticker, cik, form_type, date="most recent", previous_error=False):
response_data = sec_xbrl_single_stock(cik, form_type)
logging.info("sec response_data gathered")
relevant_xbrl_url, sic, country_code, form_type = parse_sec_results_page(response_data, cik, form_type, date=date, previous_error=previous_error)
logging.info("precise url found")
xbrl_data_page_response_data = return_url_request_data(relevant_xbrl_url)
logging.info("xbrl data downloaded")
folder_name, data_date = get_xbrl_files_and_return_folder_name(ticker, xbrl_data_page_response_data, form_type, url_in_case_of_error=relevant_xbrl_url)
if folder_name == "Error: No Table":
if not previous_error:
return full_sec_xbrl_folder_download(ticker, cik, form_type, date=date, previous_error=True)
else:
logging.error("error loop here")
return
logging.info("xbrl files created")
return folder_name, data_date, sic, country_code, form_type
#### main ####
def main_download_and_convert(ticker, cik, form_type, year=None, month=None, day=None, force_download=False, delete_files_after_import=False):
given_date = None
if year and (month and day):
try:
year = str(year).zfill(4)
month = str(month).zfill(2)
day = str(day).zfill(2)
given_date = "{}{}{}".format(year, month, day)
except:
logging.error("invalid year/month/date input")
# start by converting to path name
folder_path = return_xbrl_data_formatted_folder_path(ticker, form_type)
#logging.info(folder_path)
if not does_file_exist_in_dir(folder_path):
folder_path, form_type, make_folder = folder_path_form_type_conversion(ticker, form_type)
if make_folder:
os.makedirs(folder_path, exist_ok=True)
#logging.info(folder_path)
# if we are going to force a download attempt, the following can all be skipped
if not force_download:
# if we have a specific date we're looking for, this will do that
if given_date:
try:
folder_name = "{}-{}".format(ticker.lower(), given_date)
full_path = os.path.join(folder_path, folder_name)
if os.path.exists(full_path):
xbrl_tree_root = main_xbrl_to_json_converter(ticker, cik, given_date, full_path, delete_files_after_import=delete_files_after_import)
if not os.path.exists("{}_facts_dict.json".format(full_path)):
convert_root_node_facts_to_fact_dict(xbrl_tree_root, ticker, full_path)
return xbrl_tree_root
except Exception as e:
logging.warning(e)
logging.info("probably no date given")
pass
# if we have no date enterend (the standard case) and there *are* files
# then we will check the last month
# if there are no files from the last month, we will attempt to download from the SEC
else:
pattern = re.compile(ticker.lower() + r"-[0-9]{8}")
most_recent_folder_date = 0
folder_ymd_tuple = None
for filename in os.listdir(folder_path):
#logging.info(filename)
#logging.info(pattern.search(filename))
if filename.endswith(".json") and "facts_dict" not in filename:
if ticker.lower() in filename:
if pattern.search(filename):
ticker_hyphen_date = filename.replace(".json", "")
folder_date = ticker_hyphen_date.split("-")[1]
#logging.info("{} {}".format(folder_date, most_recent_folder_date))
if int(folder_date) > most_recent_folder_date:
most_recent_folder_date = int(folder_date)
folder_ymd_tuple = (ticker_hyphen_date, str(most_recent_folder_date)[:4], str(most_recent_folder_date)[4:6], str(most_recent_folder_date)[6:])
if folder_ymd_tuple:
#logging.info("one line below")
#logging.info(pp.pformat(folder_ymd_tuple))
most_recent_folder_time = time.strptime("{}:{}:{}".format(folder_ymd_tuple[1], folder_ymd_tuple[2], folder_ymd_tuple[3]), "%Y:%m:%d")
most_recent_folder_time = time.mktime(most_recent_folder_time)
now = float(time.time())
period_seconds = 0
if form_type in ANNUAL_FORM_TYPES:
period_seconds = MONTH_IN_SECONDS * 12
elif form_type == "10-Q":
period_seconds = MONTH_IN_SECONDS * 3
if now < (most_recent_folder_time + period_seconds): # if the folder is less than expected period for the next form
full_path = os.path.join(folder_path, folder_ymd_tuple[0])
xbrl_tree_root = main_xbrl_to_json_converter(ticker, cik, most_recent_folder_date, full_path, delete_files_after_import=delete_files_after_import)
if not os.path.exists("{}_facts_dict.json".format(full_path)):
convert_root_node_facts_to_fact_dict(xbrl_tree_root, ticker, full_path)
#logging.warning("remove this redundancy")
#convert_root_node_facts_to_fact_dict(xbrl_tree_root, ticker, full_path)
return xbrl_tree_root
folder_name, data_date, sic, country_code, form_type = full_sec_xbrl_folder_download(ticker, cik, form_type, date=given_date)
xbrl_tree_root = main_xbrl_to_json_converter(ticker, cik, data_date, folder_name, sic, country_code, delete_files_after_import=delete_files_after_import)
logging.info(folder_name)
convert_root_node_facts_to_fact_dict(xbrl_tree_root, ticker, folder_name)
return xbrl_tree_root
#### extract fact_dict data from tree ####
def convert_root_node_facts_to_fact_dict(root_node, ticker, folder_name):
local_prefixes_that_matter = PREFIXES_THAT_MATTER + [ticker.lower()]
dict_to_return = {}
item_depth = 1
context_dict = anytree.findall_by_attr(root_node, "context_dict", maxlevel=2)[0].attrib
#print(context_dict)
for node in anytree.PreOrderIter(root_node):
if node.depth == item_depth:
try:
suffix = node.suffix
dict_to_return[suffix] = {}
except:
#logging.info("failed at suffix")
continue
try:
fact = node.fact
except:
continue
if fact in ['', None]:
#logging.info("no fact")
continue
try:
context_ref = node.attrib.get("contextRef")
except Exception as e:
logging.info("failed at context_ref")
logging.info(fact)
continue
if context_ref is None:
# no date, cannot use set with json, so use list
# if label, make label dict
node_dict = dict_to_return.get(node.suffix)
if node_dict is None:
dict_to_return[node.suffix] = {}
node_dict = dict_to_return.get(node.suffix)
label_ref = node.attrib.get('{http://www.w3.org/1999/xlink}label')
label_role = node.attrib.get('{http://www.w3.org/1999/xlink}role')
if label_ref and label_role:
label_role_short = label_role.replace("http://www.xbrl.org/2003/role/", "")
label_dict = node_dict.get(label_ref)
if not label_dict:
node_dict[label_ref] = {label_role_short: fact}
else:
node_dict[label_ref][label_role_short] = fact
else: #not label
dict_list = node_dict.get('list')
if dict_list is None:
node_dict['list'] = [fact]
else:
if fact not in dict_list:
#print(dict_list)
#print(fact)
dict_list.append(fact)
dict_list.sort()
#logging.info("failed at context_ref is None")
#logging.info(fact)
continue
date = context_dict.get(context_ref)
if not date:
logging.warning(node)
logging.info("failed at date, what is fact?")
continue
unit_ref = node.attrib.get("unitRef")
if unit_ref:
if "iso4217" in unit_ref:
for usd in ["usd", "USD"]:
if usd not in unit_ref:
date = "{}_{}".format(date, unit_ref)
if unit_ref not in unit_ref_list:
unit_ref_list.append(unit_ref)
# check for axis numbers
try:
axis = node.parent.axis
except:
axis = False
node_dict = dict_to_return.get(node.suffix)
if not node_dict:
node_dict = {}
#logging.info("Axis = {}".format(axis))
if axis: # we need a complicated axis-member relationship (similar date, but subcatigory)
# our context_ref here will have all the parts listed
for prefix in local_prefixes_that_matter:
context_ref = context_ref.replace("_{}".format(prefix), "")
context_split = context_ref.split("_")
#logging.info(context_split)
context_split = [x for x in context_split if x != '']
#logging.info(context_split)
#logging.info("{}:{} = {}".format(node.suffix, context_split, fact))
failed=False
#logging.info(context_split)
formatted_context_split = []
for index, item in enumerate(context_split):
#logging.info(index, item)
if index == 0:
# here we skip the first contextref bit because it's just the date
formatted_context_split.append(item)
continue
formatted_context_split.append(item)
context_split = formatted_context_split
#logging.info(context_split)
if len(context_split) == 1:
pass
else:
#something is broken here, not getting all dates and facts for members
#logging.info(context_split)
node_dict = recursive_set_axis_member_dict(node,
node_dict,
context_split[1],
context_split,
date,
fact,)
continue
previous_entry = dict_to_return[node.suffix].get(date)
entry_dict = dict_to_return.get(node.suffix)
if previous_entry is not None:
if previous_entry != fact:
#logging.info("date: {}\nprevious entry: {}\ncurrent fact: {}\nfailed at previous_entry != node.fact\nPrevious Entry for: {}|{}|{}|{}".format(date, previous_entry, fact, node.suffix, date, node.fact, previous_entry))
node_decimals = node.attrib.get("decimals")
existing_attrib = entry_dict.get("{}_attrib".format(date))
existing_decimals = existing_attrib.get("decimals")
#logging.info("Check precision: {}|{}".format(node_decimals, existing_decimals))
if existing_decimals and node_decimals:
if existing_decimals > node_decimals:
#logging.info("Ignoring less precise data.")
continue
elif node_decimals > existing_decimals:
#logging.info("Replace with fact with better precision.")
entry_dict[date] = fact
entry_dict["{}_attrib".format(date)] = node.attrib
elif node_decimals == existing_decimals:
#logging.info("Same decimals: taking longer str")
if len(fact) > len(entry_dict.get(date)):
entry_dict[date] = fact
entry_dict["{}_attrib".format(date)] = node.attrib
else:
raise(Exception("{}|{}".format(node_decimals, existing_decimals)))
else:
#logging.info("weird data to list")
#logging.info("\n")
#logging.info("-"*50)
#logging.info(pp.pformat(previous_entry))
#logging.info("-"*50)
#logging.info(pp.pformat(fact))
#logging.info("-"*50)
#logging.info(pp.pformat(entry_dict))
#logging.info("-"*50)
#logging.info(pp.pformat(node_dict))
#logging.info("-"*50)
#logging.info(pp.pformat(existing_attrib))
#logging.info(pp.pformat(node.attrib))
#logging.info("\n")
if isinstance(previous_entry, list):
previous_entry.append(fact)
else:
entry_dict[date] = [previous_entry, fact]
else:
#logging.warning("Duplicate Entry for: {}|{}|{}|{}".format(node.suffix, date, node.fact, previous_entry))
pass
else: # previous entry is none
entry_dict[date] = fact
entry_dict["{}_attrib".format(date)] = node.attrib
# sort labels
label_dict = dict_to_return.get("label")
# here i'm going to look at the labels dict,
# then i'm going to look through the suffixes
# if the suffix matches the label id str, add the labels to that dict
for id_str, labels_to_move in label_dict.items():
for suffix_str, suffix_dict in dict_to_return.items():
if "_{}_".format(suffix_str) in id_str: # like "_Taxes_" in "lab_us-gaap_Taxes_IDCHARSBLAHBLAH"
suffix_dict["label"] = labels_to_move
break
# remove big label dict
dict_to_return.pop("label", None)
dict_to_return = {ticker: dict_to_return}
json_filename = "{}_facts_dict.json".format(folder_name)
write_dict_as_json(dict_to_return, json_filename)
def recursive_set_axis_member_dict(node, node_dict, axis_or_member, axis_member_list, date, fact, axis_or_member_index=1):
if node_dict is None:
## this just means it's a new axis member dict
#logging.info(axis_or_member)
#logging.info(axis_member_list)
pass
axis_or_member_entry = node_dict.get(axis_or_member)
if axis_or_member_entry is None:
node_dict[axis_or_member] = {}
#logging.info(axis_or_member_index, axis_member_list)
if axis_or_member_index < len(axis_member_list)-1: # we aren't done with recursion
axis_or_member_dict = node_dict.get(axis_or_member)
node_dict[axis_or_member] = recursive_set_axis_member_dict(
node,
axis_or_member_dict,
axis_member_list[axis_or_member_index + 1],
axis_member_list,
date,
fact,
axis_or_member_index = axis_or_member_index + 1)
else:
axis_or_member_entry = node_dict.get(axis_or_member)
attrib = node.attrib
if not axis_or_member_entry:
node_dict[axis_or_member] = {date: fact, "{}_attrib".format(date): attrib}
else:
previous_fact = node_dict.get(axis_or_member).get(date)
if previous_fact is not None:
if previous_fact != fact:
#logging.info("fact: {}".format(fact))
#logging.info("existing: {}".format(previous_fact))
#logging.info("node_dict.get(axis_or_member):")
#logging.info(pp.pformat(node_dict.get(axis_or_member)))
#logging.info("not get the date_attrib")
#logging.info(pp.pformat(node_dict.get(axis_or_member).get("{}_attrib".format(date))))
node_decimals = node.attrib.get("decimals")
previous_attrib = node_dict.get(axis_or_member).get("{}_attrib".format(date))
existing_decimals = previous_attrib.get("decimals")
#logging.info("Check precision: {}|{}".format(node_decimals, existing_decimals))
if existing_decimals > node_decimals:
'ignore this'
#logging.info("Ignoring less precise data.")
elif node_decimals > existing_decimals:
#logging.info("Replace with fact with better precision.")
node_dict[date] = fact
node_dict["{}_attrib".format(date)] = node.attrib
elif node_decimals == existing_decimals:
#logging.info("Same decimals: taking longer str")
#logging.info(fact)
#logging.info(node_dict)
#logging.info(date)
#logging.info(node_dict.get(axis_or_member))
#logging.info(type(node_dict.get(date)))
if len(fact) > len(previous_fact):
node_dict[date] = fact
node_dict["{}_attrib".format(date)] = node.attrib
else:
logging.info("##### {} = {} vs {}".format(axis_or_member, previous_fact, fact))
logging.info(node_dict.get(axis_or_member))
raise(Exception("{}|{}".format(node_decimals, existing_decimals)))
else:
'duplicate'
#logging.info("Fact is dublicate")
node_dict[axis_or_member] = {date: fact, "{}_attrib".format(date): attrib}
return node_dict
def return_existing_facts_dict(ticker, form_type, date=None):
if date:
filename = os.path.join("XBRL_Data", ticker.lower(), form_type, "{}-{}_facts_dict.json".format(ticker.lower(), date))
if os.path.exits(filename):
return import_json(filename)
else:
return return_most_recent_facts_dict()
def return_most_recent_facts_dict(ticker, form_type):
folder_name = os.path.join("XBRL_Data", ticker.lower(), form_type)
most_recent = None
if not does_file_exist_in_dir(folder_name):
folder_name, form_type, make_folder = folder_path_form_type_conversion(ticker.lower(), form_type)
for filename in os.listdir(folder_name):
#logging.info(filename)
if "_facts_dict.json" in filename:
ticker_date = filename.replace("_facts_dict.json", "")
#logging.info(ticker_date)
date = ticker_date.split("-")[1]
date_time_obj = datetime.datetime.strptime(date, "%Y%m%d")
if most_recent is None:
most_recent = (date_time_obj, filename)
else:
if date_time_obj > most_recent[0]:
most_recent = (date_time_obj, filename)
file_path = os.path.join("XBRL_Data", ticker.lower(), form_type, most_recent[1])
return import_json(file_path)
#### extract xbrl data from tree ####
def get_data_node(root_node, attribute_name, date=None, subcategory=None):
if date is not None:
context_dict = anytree.findall_by_attr(root_node, "context_dict", maxlevel=2)[0].attrib
node_tuple = anytree.findall_by_attr(root_node, attribute_name, maxlevel=2)
if node_tuple:
if len(node_tuple) != 1:
logging.error("There are multiple attribute nodes with the same name. This should not happen.")
return
node = node_tuple[0]
if date is None:
return node
# else let's find the date
context_ref = None
context_ref_list = [key for key, value in context_dict.items() if value == date]
if len(context_ref_list) == 1:
return context_ref_list[0]
if not subcategory:
context_ref_list = [ref for ref in context_ref_list if not '_' in ref]
if len(context_ref_list) > 1:
logging.error("More than one base category date")
#logging.info(pp.pformat(context_ref_list))
sys.exit()
context_ref = context_ref_list[0]
else:
subcategory_list = []
for ref in context_ref_list:
ref_split_list = ref.split("_", maxsplit=1)
if ref_split_list:
if subcategory == ref_split_list[-1]:
subcategory_list.append(ref)
if not subcategory_list:
return
if len(subcategory_list) > 1:
logging.error("More than one subcategory date")
#logging.info(pp.pformat(context_ref_list))
sys.exit()
context_ref = subcategory_list[0]
if context_ref:
for subnode in anytree.PreOrderIter(node):
try:
subnode_context_ref = subnode.attrib.get("contextRef")
except:
continue
if subnode_context_ref:
if context_ref == subnode_context_ref:
return subnode
else:
logging.error("No attributes of that name")
def convert_to_datetime(string_date_YYYY_MM_DD):
string_date_list = string_date_YYYY_MM_DD.split(":")
#logging.info(string_date_list)
if len(string_date_list) == 2:
start_date = string_date_list[0]
end_date = string_date_list[1]
elif len(string_date_list) == 1:
end_date = string_date_list[0]
start_date = None
else:
logging.error("{} is not a valid date string".format(string_date_YYYY_MM_DD))
return [None, None, None]
if not (end_date[:4].isdigit() and end_date[5:7].isdigit() and end_date[8:].isdigit()):
logging.error("{} is not a valid date string".format(string_date_YYYY_MM_DD))
return [None, None, None]
end_datetime_object = datetime.datetime.strptime(end_date, "%Y-%m-%d")
if start_date:
start_datetime_object = datetime.datetime.strptime(start_date, "%Y-%m-%d")
else:
start_datetime_object = None
time_delta = None
if end_datetime_object and start_datetime_object:
time_delta = end_datetime_object - start_datetime_object
return [end_datetime_object, start_datetime_object, time_delta]
def y_or_q_and_form_type_from_limit_data(Y_or_Q, form_type):
if form_type and not Y_or_Q:
if form_type in ANNUAL_FORM_TYPES:
Y_or_Q = "Y"
elif form_type == "10-Q":
Y_or_Q = "Q"
elif Y_or_Q and not form_type:
if Y_or_Q == "Y":
raise(Exception("cannot know if 10-K or 20-F or 40-F"))
form_type = "10-K"
elif Y_or_Q == "Q":
form_type = "10-Q"
elif not (form_type or Y_or_Q):
Y_or_Q = "Y"
form_type = "10-K"
return Y_or_Q, form_type
def get_most_recent_data(root_node, attribute_name, Y_or_Q=None, form_type=None, subcategory=None):
return get_most_recent_multiple_instances(root_node, attribute_name, 1, Y_or_Q=Y_or_Q, form_type=form_type, subcategory=subcategory)[0]
def get_most_recent_multiple_instances(root_node, attribute_name, number_of_instances, Y_or_Q=None, form_type=None, subcategory=None):
if not Y_or_Q:
Y_or_Q, form_type = y_or_q_and_form_type_from_limit_data(Y_or_Q, form_type)
context_dict = anytree.findall_by_attr(root_node, "context_dict", maxlevel=2)[0].attrib
relevant_node = anytree.findall_by_attr(root_node, attribute_name, maxlevel=2)
if not relevant_node:
logging.warning("no relevant node")
return
if len(relevant_node) != 1:
logging.error("There are multiple attribute nodes with the same name. This should not happen.")
return
relevant_node = relevant_node[0]
node_contextRef_date_tuple_list = []
for node in anytree.PreOrderIter(relevant_node):
#logging.info(node)
basic_contextRef = None
if subcategory:
contextRef = return_context_ref(node)
if contextRef:
if subcategory in contextRef:
basic_contextRef = return_basic_context_ref(node)
elif is_basic_date_context_ref(node):
#logging.info('is basic_contextRef')
basic_contextRef = return_context_ref(node)
#print(basic_contextRef)
if basic_contextRef:
the_context = context_dict.get(basic_contextRef)
#logging.info(the_context)
node_contextRef_date_tuple_list.append([node, basic_contextRef, the_context])
if not node_contextRef_date_tuple_list:
logging.warning("no nodes matched")
narrowed_list = []
logging.info("Y_or_Q == {}".format(Y_or_Q))
if Y_or_Q == 'Y':
applicable_refs = ["YTD"]
if Y_or_Q == 'Q':
applicable_refs = ["Q4", "Q3", "Q2", "Q1", "QTD"]
for node_contextRef_date_tuple in node_contextRef_date_tuple_list:
for ref in applicable_refs:
if node_contextRef_date_tuple[1].endswith(ref):
narrowed_list.append(node_contextRef_date_tuple)
node_contextRef_date_tuple_list = narrowed_list
most_recent_list = []
for node_contextRef_date_tuple in node_contextRef_date_tuple_list:
context_date = node_contextRef_date_tuple[2]
end_datetime, start_date, time_delta = convert_to_datetime(context_date)
if not most_recent_list:
most_recent_list.append(node_contextRef_date_tuple)
for index, existing_node_contextRef_date_tuple_list in enumerate(most_recent_list):
newer_than_index = None
existing_context_date = existing_node_contextRef_date_tuple_list[2]
existing_end_datetime, existing_start_date, existing_time_delta = convert_to_datetime(existing_context_date)
if end_datetime > existing_end_datetime:
newer_than_index = index
elif end_datetime == existing_end_datetime:
try:
node_fact = node_contextRef_date_tuple_list[0].fact
except:
node_fact = None
try:
existing_fact = existing_node_contextRef_date_tuple_list[0].fact
except:
existing_fact = None
if node_fact == existing_fact:
if start_date == existing_start_date:
logging.warning("duplicate fact, skipping")
continue
else:
if time_delta is not None:
if Y_or_Q == "Y":
if time_delta.days() < 300:
logging.warning("quarterly term for annual request, skipping")
continue
elif Y_or_Q == "Q":
if time_delta.days() > 50:
logging.warning("annual term for quarterly request, skipping")
continue
logging.warning("odd, but near identical facts are being grouped together")
newer_than_index = index
if newer_than_index:
most_recent_list.insert(newer_than_index, node_contextRef_date_tuple)
if most_recent_list:
most_recent_list = [node_ref_date_triple[0] for node_ref_date_triple in most_recent_list]
if not most_recent_list:
logging.warning("There are no facts that match that search")
return
elif len(most_recent_list) < number_of_instances:
#logging.info([[x[1], x[2]] for x in most_recent_list])
return most_recent_list
else:
return most_recent_list[0:number_of_instances]
def print_all_simple_context_refs(root_node):
pattern = re.compile(r'[A-Z]{1,2}[0-9]{4}[A-Z]{1}[0-9]{1}(YTD|QTD)?(?=\s)')
simple_context_set = set()
context_ref_list = []
for node in anytree.PreOrderIter(root_node):
context_ref = None
try:
context_ref = node.attrib.get("contextRef")
if context_ref is not None:
context_ref_list.append(context_ref)
except:
continue
if context_ref is not None:
for ref in context_ref_list:
ref_split_list = ref.split("_", maxsplit=1)
if len(ref_split_list) == 1:
simple_context_set.add(ref)
big_string = ""
for ref in simple_context_set:
big_string = "{}{}\n".format(big_string, ref)
logging.info(big_string)
logging.info(type(big_string))
matches = pattern.finditer(big_string)
logging.info(len(simple_context_set))
match_list = [match for match in matches]
logging.info(len(match_list))
#logging.info(pp.pformat(match_list))
span_list = [match.span() for match in match_list]
str_list = [big_string[span[0]: span[1]] for span in span_list]
#logging.info(pp.pformat(str_list))
#logging.info(pp.pformat([x for x in simple_context_set if x not in str_list]))
def non_basic_context_ref_pattern(root_node, attribute_name = None):
if not attribute_name:
# print full contextref bases
context_dict = anytree.findall_by_attr(root_node, "context_dict", maxlevel=2)[0].attrib
context_ref_list = context_dict.keys()
ref_list = []
for ref in sorted(context_ref_list):
if len(ref.split("_")) > 1:
pass
#ref_list.append(ref.split("_")[0])
else:
#pass
ref_list.append(ref)
ref_list = list(set(ref_list))
logging.info("")
for ref in sorted(ref_list):
logging.info(ref)
if attribute_name:
attribute_node = anytree.findall_by_attr(root_node, attribute_name, maxlevel=2)
if not attribute_node:
return
attribute_node = attribute_node[0]
attribute_ref_list = []
for node in anytree.PreOrderIter(attribute_node):
try:
attribute_ref_list.append(node.attrib.get("contextRef"))
except:
pass
attribute_ref_list = sorted(list(set([ref.split("_")[0] for ref in attribute_ref_list if ref is not None])))
for ref in attribute_ref_list:
logging.info(ref)
def get_most_recent_annual_data(root_node, attribute_name, date=None, subcategory=None):
return get_most_recent_data(root_node, attribute_name, Y_or_Q="Y")
def get_most_recent_quarterly_data(root_node, attribute_name, date=None, subcategory=None):
return get_most_recent_data(root_node, attribute_name, Y_or_Q="Q")
def get_top_data_node(root_node, attribute_name):
relevant_node = anytree.findall_by_attr(root_node, attribute_name, maxlevel=2)
return relevant_node
def return_context_ref(node):
try:
return node.attrib.get('contextRef')
except:
return
def return_basic_context_ref(node):
full_contextRef = return_context_ref(node)
split_contextRef = return_split_context_ref_list(full_contextRef)
if split_contextRef:
basic_contextRef = split_contextRef[0]
return basic_contextRef
def return_split_context_ref_list(contextRef):
if contextRef is None:
return
return contextRef.split("_")
def is_basic_date_context_ref(node):
contextRef = return_context_ref(node)
if contextRef:
if len(contextRef.split("_")) == 1:
return True
def analayse_split_context_ref(node):
contextRef = return_context_ref(node)
split_contextRef = return_split_context_ref_list(contextRef)
if split_contextRef is None:
return
# the vast majority of the time this will happen
dict_to_return = {"base": split_contextRef[0]}
if len(split_contextRef) == 1:
return dict_to_return
#else
if "Axis" in contextRef:
dict_to_return.update(return_axis_based_context_ref_dict(split_contextRef))
#logging.info(pp.pformat(dict_to_return))
return dict_to_return
def return_axis_based_context_ref_dict(split_contextRef):
dict_to_return = {}
list_len = len(split_contextRef)
indices_of_axis_strs = []
double_check_list = [split_contextRef[0]]
for index, sub_string in enumerate(split_contextRef):
if sub_string.endswith("Axis"):
indices_of_axis_strs.append(index)
for index, axis_index in enumerate(indices_of_axis_strs):
index_str = ""
if index > 0:
index_str = "_{}".format(index+1)
axis_string = split_contextRef[axis_index]
dict_to_return.update({"axis_string{}".format(index_str): axis_string})
prefix = None
if axis_index > 0:
prefix = split_contextRef[axis_index - 1]
dict_to_return.update({"axis_prefix{}".format(index_str): prefix})
double_check_list.append(prefix)
double_check_list.append(axis_string)
subcategory_prefix = None
axis_subcategory = None
try:
subcategory_prefix = split_contextRef[axis_index + 1]
axis_subcategory = split_contextRef[axis_index + 2]
except:
pass
if subcategory_prefix and axis_subcategory:
dict_to_return.update({
"axis_subcategory_prefix{}".format(index_str): subcategory_prefix,
"axis_subcategory{}".format(index_str): axis_subcategory,
})
double_check_list.append(subcategory_prefix)
double_check_list.append(axis_subcategory)
if not double_check_list == split_contextRef:
axis_extra = [x for x in split_contextRef if x not in double_check_list]
dict_to_return.update({"axis_extra": axis_extra})
return dict_to_return
delete_after_import = False
testing_write_file = False
force_download = False
testing = False
if __name__ == "__main__":
if testing:
ticker_cik_list = [("AAPL", 320193), ("GOOG", 1652044), ("MRVL", 1058057), ("GRMN", 1121788), ("STX", 1137789), ("BIDU", 1329099), ("INFY", 1067491), ("WCRX", 1323854), ("CHKP", 1015922), ("TEVA", 818686), ("FLEX", 866374), ("LOGI", 1032975)]
randomize = False
date_specific = False
delete_after_import = False
testing_write_file = True
force_download = False
for ticker_cik in ticker_cik_list:
form = '10-K'
forms = ["10-K", "10-Q"]
form_choice = forms.index(form)
if randomize:
random_form = random.choice(forms)
logging.info(random_form)
form_choice = forms.index(random_form)
form_type = forms[form_choice]
xbrl_tree_root = main_download_and_convert(ticker_cik[0].lower(), ticker_cik[1], form_type, delete_files_after_import=delete_after_import, force_download=force_download)
my_dict = return_most_recent_facts_dict(ticker_cik[0], form)
#logging.info(pp.pformat(unit_ref_list))
#end of line | 45.479484 | 250 | 0.601987 | import sys, os, shutil, logging, datetime, json, time, copy, re, random
import urllib.request
import bs4, anytree, anytree.exporter, anytree.importer
import xml.etree.ElementTree as ET
import pprint as pp
logging.basicConfig(format=' ---- %(filename)s|%(lineno)d ----\n%(message)s', level=logging.INFO)
clarks_to_ignore = ['http://www.w3.org/2001/XMLSchema',
'http://www.xbrl.org/2003/instance',
'http://www.xbrl.org/2003/linkbase',
'http://xbrl.org/2006/xbrldi',
]
unit_ref_list = []
MONTH_IN_SECONDS = 60.0 * 60 * 24 * 7 * 30
ANNUAL_FORM_TYPES = ["10-K", "20-F", "40-F"]
PREFIXES_THAT_MATTER = ["us-gaap", "dei", "srt", "country", "stpr", "custom"]
US_COUNTRY_CODES = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DE", "DC", "FL", "GA", "HI", "ID", "IL",
"IN", "IA", "KS", "KY", "LA", "ME", "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE",
"NV", "NH", "NJ","NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC", "SD",
"TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY", "X1", ]
CANADA_COUNTRY_CODES = ["A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "A8", "A9", "B0", "Z4",]
def main_xbrl_to_json_converter(ticker, cik, date, folder_path, sic=None, country_code=None, delete_files_after_import=False):
root_node_dict = {}
potential_json_filename = return_xbrl_to_json_converted_filename_with_date(folder_path, ticker, date)
try:
root_json = import_json(potential_json_filename)
root_node = convert_dict_to_node_tree(root_json)
except Exception as e:
logging.error(e)
root_node = None
if not root_node:
logging.info("json file does not already exist, creating one...")
list_of_filenames_in_directory = os.listdir(folder_path)
for filename in list_of_filenames_in_directory:
if filename.endswith(".xml") or filename.endswith(".xsd"):
xbrl_filename = os.path.join(folder_path, filename)
logging.info("processing xbrl file: {}".format(xbrl_filename))
root_node = xbrl_to_json_processor(xbrl_filename, ticker, write_file=testing_write_file, write_txt_file=testing_write_file)
logging.info("done")
root_node_dict[filename] = root_node
fact_tree_root = fact_centric_xbrl_processor(root_node_dict, ticker, sic, country_code)
write_txt_file = not delete_files_after_import
root_node = xbrl_to_json_processor(potential_json_filename, ticker, root_node=fact_tree_root, write_file=True, write_txt_file=write_txt_file)
if delete_files_after_import:
if os.path.isdir(folder_path):
shutil.rmtree(folder_path)
potential_txt_file = "{}.json_render.txt".format(folder_path)
if os.path.isfile(potential_txt_file):
os.remove(potential_txt_file)
return root_node
def return_refernce_node(node, fact_tree_root, other_tree_root, ticker):
local_prefixes_that_matter = PREFIXES_THAT_MATTER + [ticker.lower()]
reference_node = None
locator = None
href = node.attrib.get("{http://www.w3.org/1999/xlink}href")
if href:
locator = return_xlink_locator(node)
else:
if node.clark not in clarks_to_ignore:
locator = node.suffix
if locator:
locator_prefix = None
modified_locator = None
for prefix in local_prefixes_that_matter:
if locator.startswith("{}_".format(prefix)):
locator_prefix = prefix
modified_locator = locator.replace("{}_".format(prefix), "")
if modified_locator:
reference_node = anytree.search.find_by_attr(fact_tree_root, modified_locator)
else:
reference_node = anytree.search.find_by_attr(fact_tree_root, locator)
if not reference_node:
if modified_locator:
if locator_prefix:
reference_node = anytree.Node(modified_locator,
parent=fact_tree_root,
prefix=locator_prefix,
suffix=modified_locator)
else:
reference_node = anytree.Node(modified_locator,
parent=fact_tree_root,
suffix=modified_locator)
else:
reference_node = anytree.Node(locator,
parent=fact_tree_root,
suffix=locator)
try:
existing_prefix = reference_node.prefix
except:
''' it doesn't so lets try and find one '''
node_prefix=None
''' we want the node to have a fact
(because we are looking for fact prefix from the main .xml file)
'''
try:
node_fact = node.fact
except:
'probably not a fact node'
node_fact = ""
if node_fact != "":
try:
node_prefix = node.prefix
except:
node_prefix = None
if node_prefix:
reference_node.prefix = node_prefix
return reference_node
else:
''' this is a contextual item
we will put this item in to the "other tree root" tree and deal with it later
'''
xbrli_node = anytree.search.find_by_attr(other_tree_root, "{{{}}}{}".format(node.clark, node.suffix))
if not xbrli_node:
xbrli_node = anytree.Node("{{{}}}{}".format(node.clark, node.suffix),
parent=other_tree_root,
suffix=node.suffix)
return xbrli_node
def fact_centric_xbrl_processor(root_node_dict, ticker, sic, country_code, sort_trash_for_debugging=False):
fact_tree_root = anytree.Node(ticker)
other_tree_root = anytree.Node('xbrli')
trash_tree_root = anytree.Node('unsorted_trash')
parent_child_tuple_list = []
extension_order_after_primary_file_list = [".xsd", "_lab.xml", "_def.xml", "_cal.xml", "_pre.xml"]
ordered_filename_list = []
for extention in extension_order_after_primary_file_list:
for filename in root_node_dict.keys():
if filename.endswith(extention):
ordered_filename_list.append(filename)
for filename in root_node_dict.keys():
if filename not in ordered_filename_list:
ordered_filename_list.insert(0, filename)
logging.info("Start initial sorting:")
start_time = time.time()
for filename in ordered_filename_list:
logging.info(filename)
root_node = root_node_dict.get(filename)
for node in anytree.PreOrderIter(root_node):
try:
suffix = node.suffix
except:
logging.error("there is a problem with this node... it has no 'suffix' attribute")
sys.exit()
reference_node = return_refernce_node(node, fact_tree_root, other_tree_root, ticker)
parent_child_tuple_list.append((reference_node, node))
for reference_node, child in parent_child_tuple_list:
unique = True
for existing_child in reference_node.children:
if vars(child) == vars(existing_child):
unique = False
if unique == False:
break
if unique == True:
child.parent = reference_node
else:
''' here, our node is a duplicate, and so we throw it out '''
child.parent = trash_tree_root
print_root_node_lengths(fact_tree_root, other_tree_root, trash_tree_root)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
logging.info("Start deep sorting:")
start_time = time.time()
fact_tree_children_dict = {node.suffix: node for node in fact_tree_root.children}
for node in anytree.PreOrderIter(other_tree_root):
replacement_parent = return_new_parent(node, fact_tree_children_dict)
if replacement_parent:
node.parent = replacement_parent
print_root_node_lengths(fact_tree_root, other_tree_root, trash_tree_root)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
logging.info("Start deep sorting second pass:")
start_time = time.time()
for node in anytree.PreOrderIter(other_tree_root):
replacement_parent = return_new_parent_round_two(node, fact_tree_children_dict)
if replacement_parent:
node.parent = replacement_parent
print_root_node_lengths(fact_tree_root, other_tree_root, trash_tree_root)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
logging.info("Start contextRef sorting:")
start_time = time.time()
for node in anytree.PreOrderIter(fact_tree_root):
replacement_parent = return_new_parent_for_Axis_contextRefs(node)
if replacement_parent:
node.parent = replacement_parent
print_root_node_lengths(fact_tree_root, other_tree_root, trash_tree_root)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
logging.info("Create context refs dict:")
start_time = time.time()
convert_context_refs_into_id_keyed_dict(fact_tree_root, other_tree_root, trash_tree_root, sic, country_code)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
if sort_trash_for_debugging:
logging.info("Sort trash file:")
start_time = time.time()
trash_tree_root = keep_trash_sorted(trash_tree_root)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
logging.info("Saving text files")
start_time = time.time()
if testing:
fact_tree_root_filename = ticker + "_facts"
root_node_to_rendertree_text_file(fact_tree_root, fact_tree_root_filename)
other_tree_root_filename = ticker + "_xbrli"
root_node_to_rendertree_text_file(other_tree_root, other_tree_root_filename)
trash_filename = ticker + "_trash"
root_node_to_rendertree_text_file(trash_tree_root, trash_filename)
logging.info("Finished in {}sec".format(round(time.time() - start_time)))
return fact_tree_root
def convert_context_refs_into_id_keyed_dict(fact_tree_root, other_tree_root, trash_tree_root, sic, country_code):
context_node = None
period_node_list = []
for child in list(other_tree_root.children):
if child.suffix == "context":
context_node = child
elif child.suffix in ["startDate", "endDate", "instant", "forever"]:
period_node_list.append(child)
context_dict = {}
for period_node in period_node_list:
for node in anytree.PreOrderIter(period_node):
try:
existing_entry = context_dict.get(node.parent_id)
except:
continue
if node.parent.suffix == "measure":
continue
if existing_entry is None:
context_dict[node.parent_id] = node.fact
else:
if node.suffix == "startDate":
new_entry = node.fact + ":" + existing_entry
elif node.suffix == "endDate":
new_entry = existing_entry + ":" + node.fact
elif node.suffix == "instant":
logging.error("This should not happen. Examine this code error")
sys.exit()
context_dict[node.parent_id] = new_entry
node.parent = trash_tree_root
for node in anytree.PreOrderIter(context_node):
node.parent = trash_tree_root
context_dict_node = anytree.Node("context_dict", parent=fact_tree_root, attrib = context_dict)
context_sic_node = anytree.Node("sic", parent=fact_tree_root, attrib = sic)
context_country_code_node = anytree.Node("country_code", parent=fact_tree_root, attrib = country_code)
def keep_trash_sorted(trash_tree_root):
sorted_trash_tree_root = anytree.Node('trash')
for node in anytree.PreOrderIter(trash_tree_root):
success = False
if node.parent:
for sorted_node in anytree.PreOrderIter(sorted_trash_tree_root):
if sorted_node.parent:
if vars(node) == vars(sorted_node):
success = True
node.parent = sorted_node
break
if not success:
node.parent = sorted_trash_tree_root
logging.info("old trash tree")
logging.info(anytree.RenderTree(trash_tree_root))
return sorted_trash_tree_root
def print_root_node_lengths(fact_tree_root, other_tree_root, trash_tree_root):
fact_tree_root_len = len(list(anytree.PreOrderIter(fact_tree_root)))
other_tree_root_len = len(list(anytree.PreOrderIter(other_tree_root)))
trash_tree_root_len = len(list(anytree.PreOrderIter(trash_tree_root)))
logging.info("facts:\t{}\tother:\t{}\ttrash:\t{}".format(fact_tree_root_len, other_tree_root_len, trash_tree_root_len))
def return_new_parent(node, fact_tree_children_dict):
try:
parent_id = node.parent_id
except:
parent_id = None
if parent_id:
parent = fact_tree_children_dict.get(parent_id)
if parent:
return parent
try:
dimension = node.attrib.get("dimension")
except:
dimension = None
if dimension:
dimension_parent_id = dimension.split(":")[-1]
parent = fact_tree_children_dict.get(dimension_parent_id)
if parent:
return parent
dimension_underscore = dimension.replace(":", "_")
parent = fact_tree_children_dict.get(dimension_underscore)
if parent:
return parent
try:
label = node.attrib.get("{http://www.w3.org/1999/xlink}label")
except:
label = None
if label:
for suffix, tree_node in fact_tree_children_dict.items():
if suffix in label:
try:
parent_label = tree_node.attrib.get("{http://www.w3.org/1999/xlink}label")
except:
parent_label = None
if parent_label:
if label == parent_label:
return tree_node
parent = recursive_label_node_getter(tree_node, label)
if parent:
return parent
try:
from_attrib = node.attrib.get("{http://www.w3.org/1999/xlink}from")
to_attrib = node.attrib.get("{http://www.w3.org/1999/xlink}to")
except:
from_attrib = None
to_attrib = None
if from_attrib and to_attrib:
for suffix, tree_node in fact_tree_children_dict.items():
if suffix in from_attrib:
try:
parent_label = tree_node.attrib.get("{http://www.w3.org/1999/xlink}label")
except:
parent_label = None
if parent_label:
if from_attrib == parent_label:
return tree_node
parent = recursive_label_node_getter(tree_node, from_attrib)
if parent:
return parent
try:
role = node.attrib.get("{http://www.w3.org/1999/xlink}role")
except:
role = None
if role:
parent = fact_tree_children_dict.get(role.split("/")[-1])
if parent:
return parent
def return_new_parent_round_two(node, fact_tree_children_dict):
look_up_list = ["name", "{http://www.w3.org/1999/xlink}from", "id"]
for item in look_up_list:
try:
attribute = node.attrib.get(item)
except:
attribute = None
if attribute:
for suffix, tree_node in fact_tree_children_dict.items():
if suffix == attribute:
return tree_node
elif suffix in attribute:
parent = recursive_node_id_getter(tree_node, attribute)
if parent:
return parent
parent = recursive_label_node_getter(tree_node, attribute)
if parent:
return parent
def return_new_parent_for_Axis_contextRefs(node):
try:
contextRef = node.attrib.get('contextRef')
except:
return
if contextRef is None:
return
split_contextRef = contextRef.split("_")
if len(split_contextRef) == 1:
return
if "Axis" in contextRef or "Member" in contextRef:
for index, sub_string in enumerate(split_contextRef):
if sub_string.endswith("Axis") or sub_string.endswith("Member"):
parent = node.parent
for child in parent.children:
if child.suffix == sub_string:
return child
subparent = anytree.Node(sub_string,
parent = parent,
suffix = sub_string,
axis = True
)
return subparent
def recursive_node_id_getter(node, original_id):
try:
potential_id_match = node.attrib.get("id")
except:
potential_id_match = None
if potential_id_match:
if original_id == potential_id_match:
return node
for child in node.children:
parent = recursive_node_id_getter(child, original_id)
if parent:
return parent
def recursive_label_node_getter(node, original_label):
try:
potential_match = node.attrib.get("{http://www.w3.org/1999/xlink}label")
except:
potential_match = None
if potential_match:
if original_label == potential_match:
return node
if original_label == potential_match.replace("loc_", "lab_"):
return node
for child in node.children:
parent = recursive_label_node_getter(child, original_label)
if parent:
return parent
def other_tree_node_replacement(attribute_list, fact_tree_root_children):
replacement_node = None
for child in fact_tree_root_children:
for attribute in attribute_list:
if attribute == child.suffix:
replacement_node = child
if replacement_node:
return replacement_node
if not replacement_node:
for attribute in attribute_list:
new_attr = attribute.replace(":", "_")
if new_attr == child.suffix:
replacement_node = child
if replacement_node:
return replacement_node
if not replacement_node:
for attribute in attribute_list:
try:
new_attr = attribute.split(":")[-1]
except:
continue
if new_attr == child.suffix:
replacement_node = child
if replacement_node:
return replacement_node
if not replacement_node:
for attribute in attribute_list:
try:
new_attr = attribute.split("_")[-1]
except:
continue
if new_attr == child.suffix:
replacement_node = child
if replacement_node:
return replacement_node
return replacement_node
def xbrl_to_json_processor(xbrl_filename, ticker, root_node=None, write_file=False, write_txt_file=False):
if not (xbrl_filename or root_node):
logging.error("You must include a either a filename or root_node")
sys.exit()
if not root_node:
root_node = process_xbrl_file_to_tree(xbrl_filename, ticker)
flat_file_dict = convert_tree_to_dict(root_node)
if write_file:
should_be_json_filename = xbrl_filename
if not should_be_json_filename.endswith(".json"):
should_be_json_filename = should_be_json_filename + ".json"
write_dict_as_json(flat_file_dict, should_be_json_filename)
if write_txt_file:
root_node_to_rendertree_text_file(root_node, should_be_json_filename)
return root_node
def custom_render_tree(root_node):
output_str = ""
for pre, _, node in anytree.RenderTree(root_node):
fact = ""
formatted_fact = ""
attrib = ""
formatted_attrib = ""
try:
fact = node.fact
attrib = node.attrib
except:
pass
if fact:
formatted_fact = "\n{}{}".format(pre, fact)
if attrib:
formatted_attrib = "\n{}{}".format(pre, attrib)
formatted_str = "{}{}{}{}\n".format(pre, node.name, formatted_fact, formatted_attrib)
output_str = output_str + "\n" + formatted_str
return output_str
def root_node_to_rendertree_text_file(root_node, xbrl_filename, custom=False):
with open('{}_render.txt'.format(xbrl_filename), 'w') as outfile:
if custom:
output_str = custom_render_tree(root_node)
else:
output_str = str(anytree.RenderTree(root_node))
outfile.write(output_str)
def recursive_iter(xbrl_element, reversed_ns, ticker, parent=None, node_order=0):
elements = []
clark, prefix, suffix = xbrl_clark_prefix_and_suffix(xbrl_element, reversed_ns)
fact = xbrl_element.text
if isinstance(fact, str):
fact = fact.strip()
if fact is None:
fact = ""
attrib = xbrl_element.attrib
parent_id = None
if fact:
try:
parent_id = parent.attrib.get("id")
if parent_id is None:
if parent.suffix == "period":
grandparent = parent.parent
parent_id = grandparent.attrib.get("id")
except:
pass
if parent_id and fact:
node_element = anytree.Node(suffix,
parent = parent,
parent_id = parent_id,
clark = clark,
prefix = prefix,
suffix = suffix,
fact = fact,
attrib = attrib,
)
else:
node_element = anytree.Node(suffix,
parent = parent,
clark = clark,
prefix = prefix,
suffix = suffix,
fact = fact,
attrib = attrib,
)
elements.append(node_element)
subtag_count_dict = {}
for element in xbrl_element:
count = subtag_count_dict.get(element.tag)
if count is None:
subtag_count_dict[element.tag] = 1
count = 0
else:
subtag_count_dict[element.tag] = count + 1
sub_elements = recursive_iter(element,
reversed_ns,
ticker,
parent=node_element,
node_order=count,
)
for element_sub2 in sub_elements:
elements.append(element_sub2)
return elements
def process_xbrl_file_to_tree(xbrl_filename, ticker):
logging.info(xbrl_filename)
tree, ns, root = extract_xbrl_tree_namespace_and_root(xbrl_filename)
reversed_ns = {value: key for key, value in ns.items()}
elements = recursive_iter(root, reversed_ns, ticker)
xbrl_tree_root = elements[0]
return xbrl_tree_root
def convert_tree_to_dict(root_node):
exporter = anytree.exporter.JsonExporter(indent=2, sort_keys=True)
json_dict = json.loads(exporter.export(root_node))
return json_dict
def convert_dict_to_node_tree(dict_to_convert):
importer = anytree.importer.JsonImporter()
json_str = json.dumps(dict_to_convert)
root_node = importer.import_(json_str)
return root_node
lename):
ns = {}
try:
for event, (name, value) in ET.iterparse(xbrl_filename, ['start-ns']):
if name:
ns[name] = value
except Exception as e:
logging.error(e)
return[None, None, None]
tree = ET.parse(xbrl_filename)
root = tree.getroot()
return [tree, ns, root]
def xbrl_clark_prefix_and_suffix(xbrl_element, reversed_ns):
clark, suffix = xbrl_element.tag[1:].split("}")
prefix = reversed_ns.get(clark)
return [clark, prefix, suffix]
def xbrl_ns_clark(xbrl_element):
return xbrl_element.tag.split("}")[0][1:]
def xbrl_ns_prefix(xbrl_element, ns):
return [key for key, value in ns.items() if xbrl_ns_clark(xbrl_element) == value][0]
def xbrl_ns_suffix(xbrl_element):
return xbrl_element.tag.split("}")[1]
def return_xlink_locator(element_with_href):
href = element_with_href.attrib.get("{http://www.w3.org/1999/xlink}href")
href_list = href.split("#")
if len(href_list) > 1:
href = href_list[-1]
return href
def import_json(json_filename):
logging.info("importing: {}".format(json_filename))
with open(json_filename, 'r') as inputfile:
data_dict = json.load(inputfile)
return data_dict
def write_dict_as_json(dict_to_write, json_filename):
logging.info("writing: {}".format(json_filename))
with open(json_filename, 'w') as outfile:
json.dump(dict_to_write, outfile, indent=2)
def form_type_conversion(form_type, country_code, us_codes=US_COUNTRY_CODES, ca_codes=CANADA_COUNTRY_CODES):
logging.info(country_code)
if form_type == "10-Q":
if country_code not in us_codes:
return None
elif form_type in ANNUAL_FORM_TYPES:
if country_code in us_codes:
return "10-K"
elif country_code in ca_codes:
return "40-F"
else:
return "20-F"
else:
return None
def folder_path_form_type_conversion(ticker, form_type):
make_folder = False
folder_path = return_xbrl_data_formatted_folder_path(ticker, form_type)
if not does_file_exist_in_dir(folder_path):
if form_type in ANNUAL_FORM_TYPES:
us_path = return_xbrl_data_formatted_folder_path(ticker, "10-K")
ca_path = return_xbrl_data_formatted_folder_path(ticker, "40-F")
adr_path = return_xbrl_data_formatted_folder_path(ticker, "20-F")
if does_file_exist_in_dir(us_path):
form_type = '10-K'
folder_path = us_path
elif does_file_exist_in_dir(ca_path):
form_type = '40-F'
folder_path = ca_path
elif does_file_exist_in_dir(adr_path):
form_type = '20-F'
folder_path = adr_path
else:
make_folder = True
else:
make_folder = True
return folder_path, form_type, make_folder
def does_file_exist_in_dir(path):
try:
return any(os.path.isfile(os.path.join(path, i)) for i in os.listdir(path))
except:
return None
time.sleep(sleep)
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9"}
http__prefix = "http://"
https_prefix = "https://"
if secure:
url_prefix = https_prefix
else:
url_prefix = http__prefix
if "http://" in url or "https://" in url:
url_prefix = ""
url = url_prefix + url
encoded_url_extra_values = urllib.parse.urlencode(values_dict)
data = encoded_url_extra_values.encode('utf-8')
if data:
request = urllib.request.Request(url, data, headers=headers)
else:
request = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(request)
response_data = response.read().decode('utf-8')
return response_data
def sec_xbrl_single_stock(cik, form_type):
base_url = "https://www.sec.gov/cgi-bin/browse-edgar"
values = {"action": "getcompany",
"CIK": cik,
"type": form_type,
}
response_data = return_url_request_data(base_url, values, secure=True)
return response_data
def parse_sec_results_page(sec_response_data, cik, form_type, date="most recent", previous_error=False):
soup = bs4.BeautifulSoup(sec_response_data, 'html.parser')
if form_type == "10-K":
ten_k_soup = soup
identInfo = soup.find_all("p", class_="identInfo")[0]
linked_data = identInfo.find_all("a")
previous_item = None
sic = None
country_code = None
for item in linked_data:
href = item.get("href")
if "&SIC=" in href:
sic = item.text
elif "&State=" in href:
country_code = item.text
new_form_type = form_type_conversion(form_type, country_code)
if new_form_type != form_type:
logging.info("{} vs {}".format(new_form_type, form_type))
form_type = new_form_type
logging.info("-"*2000)
logging.info(form_type)
new_response_data = sec_xbrl_single_stock(cik, form_type)
soup = bs4.BeautifulSoup(new_response_data, 'html.parser')
if not sic and country_code:
raise(Exception)
table_list = soup.find_all("table", {"summary": "Results"})
if not len(table_list) == 1:
logging.error("something's up here")
table = table_list[0]
logging.info(table)
document_button_list = table.find_all("a", {"id":"documentsbutton"})
logging.info(document_button_list)
if not document_button_list:
if form_type not in ["10-K", "10-Q"]:
logging.warning("something else is up here")
soup = ten_k_soup
table_list = soup.find_all("table", {"summary": "Results"})
#logging.info(len(table_list))
if not len(table_list) == 1:
logging.error("something's up here")
table = table_list[0]
logging.info(table)
document_button_list = table.find_all("a", {"id":"documentsbutton"})
logging.info(document_button_list)
if document_button_list:
form_type = "10-K"
if not date:
date = "most recent"
if date == "most recent":
relevant_a_tag = table.find("a", {"id":"documentsbutton"})
if previous_error:
relevant_interactive_tag = table.find("a", {"id":"interactiveDataBtn"})
tag_parent = relevant_interactive_tag.parent
relevant_a_tag = tag_parent.find("a", {"id":"documentsbutton"})
else:
year = date[:4]
month = date[4:6]
day = date[6:]
logging.info("{}-{}-{}".format(year, month, day))
relevant_td = table.find("td", string="{}-{}-{}".format(year, month, day))
relevant_td_parent = None
if not relevant_td:
relevant_interactive_tags = table.find_all("a", {"id":"interactiveDataBtn"})
tag_parents = [tag.parent.parent for tag in relevant_interactive_tags]
if tag_parents:
# starting with the listed month, to find the nearest previous entry
# if the month is correct, it should work the first time
# if you encounter an error here, that's what's happening
for i in reversed(range(int(month))):
month_str = str(i+1).zfill(2)
date_str = "{}-{}".format(year, month_str)
for parent in tag_parents:
if date_str in parent.text:
for child in parent.children:
if child.string:
if date_str in child.string:
relevant_td = child
if relevant_td:
break
if relevant_td:
break
if relevant_td:
break
relevant_td_parent = relevant_td.parent
relevant_a_tag = relevant_td_parent.find("a", {"id":"documentsbutton"})
logging.info(relevant_a_tag)
relevant_a_href = relevant_a_tag['href']
sec_url = "https://www.sec.gov"
relevant_xbrl_url = sec_url + relevant_a_href
return relevant_xbrl_url, sic, country_code, form_type
def write_xbrl_file(file_name, sec_response_data):
with open(file_name, 'w') as outfile:
outfile.write(sec_response_data)
def return_xbrl_data_formatted_folder_path(ticker, form_type):
return os.path.join("XBRL_Data", ticker, form_type)
def return_most_recent_xbrl_to_json_converted_filename(folder_path, ticker):
filename = find_most_recent_filename_from_date(folder_path, ticker)
return os.path.join(folder_path, filename)
def return_xbrl_to_json_converted_filename_with_date(folder_path, ticker, date):
ticker_date = "{}-{}".format(ticker, date)
if folder_path.endswith(ticker_date):
filename = folder_path + ".json"
else:
filename = ticker_date + ".json"
filename = os.path.join(folder_path, filename)
return filename
def find_most_recent_filename_from_date(folder_path, ticker):
pattern = re.compile(ticker.lower() + r"-[0-9]{8}")
most_recent_folder_date = 0
most_recent_filename = None
for filename in os.listdir(folder_path):
#logging.info(filename)
if filename.endswith(".json"):
if ticker.lower() in filename:
if pattern.search(filename):
ticker_hyphen_date = filename.replace(".json", "")
folder_date = ticker_hyphen_date.split("-")[1]
if int(folder_date) > most_recent_folder_date:
most_recent_folder_date = int(folder_date)
most_recent_filename = filename
return most_recent_filename
def get_xbrl_files_and_return_folder_name(ticker, xbrl_data_page_response_data, form_type, url_in_case_of_error=None):
soup = bs4.BeautifulSoup(xbrl_data_page_response_data, 'html.parser')
table_list = soup.find_all("table", {"summary": "Data Files"})
if not len(table_list) == 1:
logging.error("something's up here")
if not table_list:
logging.error("Likely refering to a sec page without XBRL, manually check the url")
logging.error(url_in_case_of_error)
return "Error: No Table"
table = table_list[0]
a_tag_list = table.find_all("a")
sec_url = "https://www.sec.gov"
folder_name = None
data_date = None
for a in a_tag_list:
href = a["href"]
file_name = a.text
if not folder_name:
if "_" not in file_name:
folder_name = file_name.split(".")[0]
else:
folder_name = file_name.split("_")[0]
data_date = folder_name.split("-")[1]
full_file_name = os.path.join("XBRL_Data", ticker, form_type, folder_name, file_name)
full_folders_name = os.path.join("XBRL_Data", ticker, form_type, folder_name)
if not os.path.exists(full_folders_name):
os.makedirs(full_folders_name)
else:
if os.path.exists(full_file_name):
logging.info("Data for {} already exists".format(ticker))
return full_folders_name, data_date
full_url = sec_url + href
response_data = return_url_request_data(full_url)
write_xbrl_file(full_file_name, response_data)
return full_folders_name, data_date
def full_sec_xbrl_folder_download(ticker, cik, form_type, date="most recent", previous_error=False):
response_data = sec_xbrl_single_stock(cik, form_type)
logging.info("sec response_data gathered")
relevant_xbrl_url, sic, country_code, form_type = parse_sec_results_page(response_data, cik, form_type, date=date, previous_error=previous_error)
logging.info("precise url found")
xbrl_data_page_response_data = return_url_request_data(relevant_xbrl_url)
logging.info("xbrl data downloaded")
folder_name, data_date = get_xbrl_files_and_return_folder_name(ticker, xbrl_data_page_response_data, form_type, url_in_case_of_error=relevant_xbrl_url)
if folder_name == "Error: No Table":
if not previous_error:
return full_sec_xbrl_folder_download(ticker, cik, form_type, date=date, previous_error=True)
else:
logging.error("error loop here")
return
logging.info("xbrl files created")
return folder_name, data_date, sic, country_code, form_type
rm_type, year=None, month=None, day=None, force_download=False, delete_files_after_import=False):
given_date = None
if year and (month and day):
try:
year = str(year).zfill(4)
month = str(month).zfill(2)
day = str(day).zfill(2)
given_date = "{}{}{}".format(year, month, day)
except:
logging.error("invalid year/month/date input")
folder_path = return_xbrl_data_formatted_folder_path(ticker, form_type)
if not does_file_exist_in_dir(folder_path):
folder_path, form_type, make_folder = folder_path_form_type_conversion(ticker, form_type)
if make_folder:
os.makedirs(folder_path, exist_ok=True)
if not force_download:
if given_date:
try:
folder_name = "{}-{}".format(ticker.lower(), given_date)
full_path = os.path.join(folder_path, folder_name)
if os.path.exists(full_path):
xbrl_tree_root = main_xbrl_to_json_converter(ticker, cik, given_date, full_path, delete_files_after_import=delete_files_after_import)
if not os.path.exists("{}_facts_dict.json".format(full_path)):
convert_root_node_facts_to_fact_dict(xbrl_tree_root, ticker, full_path)
return xbrl_tree_root
except Exception as e:
logging.warning(e)
logging.info("probably no date given")
pass
# if we have no date enterend (the standard case) and there *are* files
# then we will check the last month
# if there are no files from the last month, we will attempt to download from the SEC
else:
pattern = re.compile(ticker.lower() + r"-[0-9]{8}")
most_recent_folder_date = 0
folder_ymd_tuple = None
for filename in os.listdir(folder_path):
#logging.info(filename)
#logging.info(pattern.search(filename))
if filename.endswith(".json") and "facts_dict" not in filename:
if ticker.lower() in filename:
if pattern.search(filename):
ticker_hyphen_date = filename.replace(".json", "")
folder_date = ticker_hyphen_date.split("-")[1]
#logging.info("{} {}".format(folder_date, most_recent_folder_date))
if int(folder_date) > most_recent_folder_date:
most_recent_folder_date = int(folder_date)
folder_ymd_tuple = (ticker_hyphen_date, str(most_recent_folder_date)[:4], str(most_recent_folder_date)[4:6], str(most_recent_folder_date)[6:])
if folder_ymd_tuple:
#logging.info("one line below")
#logging.info(pp.pformat(folder_ymd_tuple))
most_recent_folder_time = time.strptime("{}:{}:{}".format(folder_ymd_tuple[1], folder_ymd_tuple[2], folder_ymd_tuple[3]), "%Y:%m:%d")
most_recent_folder_time = time.mktime(most_recent_folder_time)
now = float(time.time())
period_seconds = 0
if form_type in ANNUAL_FORM_TYPES:
period_seconds = MONTH_IN_SECONDS * 12
elif form_type == "10-Q":
period_seconds = MONTH_IN_SECONDS * 3
if now < (most_recent_folder_time + period_seconds): # if the folder is less than expected period for the next form
full_path = os.path.join(folder_path, folder_ymd_tuple[0])
xbrl_tree_root = main_xbrl_to_json_converter(ticker, cik, most_recent_folder_date, full_path, delete_files_after_import=delete_files_after_import)
if not os.path.exists("{}_facts_dict.json".format(full_path)):
convert_root_node_facts_to_fact_dict(xbrl_tree_root, ticker, full_path)
#logging.warning("remove this redundancy")
#convert_root_node_facts_to_fact_dict(xbrl_tree_root, ticker, full_path)
return xbrl_tree_root
folder_name, data_date, sic, country_code, form_type = full_sec_xbrl_folder_download(ticker, cik, form_type, date=given_date)
xbrl_tree_root = main_xbrl_to_json_converter(ticker, cik, data_date, folder_name, sic, country_code, delete_files_after_import=delete_files_after_import)
logging.info(folder_name)
convert_root_node_facts_to_fact_dict(xbrl_tree_root, ticker, folder_name)
return xbrl_tree_root
#### extract fact_dict data from tree ####
def convert_root_node_facts_to_fact_dict(root_node, ticker, folder_name):
local_prefixes_that_matter = PREFIXES_THAT_MATTER + [ticker.lower()]
dict_to_return = {}
item_depth = 1
context_dict = anytree.findall_by_attr(root_node, "context_dict", maxlevel=2)[0].attrib
#print(context_dict)
for node in anytree.PreOrderIter(root_node):
if node.depth == item_depth:
try:
suffix = node.suffix
dict_to_return[suffix] = {}
except:
#logging.info("failed at suffix")
continue
try:
fact = node.fact
except:
continue
if fact in ['', None]:
#logging.info("no fact")
continue
try:
context_ref = node.attrib.get("contextRef")
except Exception as e:
logging.info("failed at context_ref")
logging.info(fact)
continue
if context_ref is None:
# no date, cannot use set with json, so use list
# if label, make label dict
node_dict = dict_to_return.get(node.suffix)
if node_dict is None:
dict_to_return[node.suffix] = {}
node_dict = dict_to_return.get(node.suffix)
label_ref = node.attrib.get('{http://www.w3.org/1999/xlink}label')
label_role = node.attrib.get('{http://www.w3.org/1999/xlink}role')
if label_ref and label_role:
label_role_short = label_role.replace("http://www.xbrl.org/2003/role/", "")
label_dict = node_dict.get(label_ref)
if not label_dict:
node_dict[label_ref] = {label_role_short: fact}
else:
node_dict[label_ref][label_role_short] = fact
else: #not label
dict_list = node_dict.get('list')
if dict_list is None:
node_dict['list'] = [fact]
else:
if fact not in dict_list:
#print(dict_list)
#print(fact)
dict_list.append(fact)
dict_list.sort()
#logging.info("failed at context_ref is None")
#logging.info(fact)
continue
date = context_dict.get(context_ref)
if not date:
logging.warning(node)
logging.info("failed at date, what is fact?")
continue
unit_ref = node.attrib.get("unitRef")
if unit_ref:
if "iso4217" in unit_ref:
for usd in ["usd", "USD"]:
if usd not in unit_ref:
date = "{}_{}".format(date, unit_ref)
if unit_ref not in unit_ref_list:
unit_ref_list.append(unit_ref)
# check for axis numbers
try:
axis = node.parent.axis
except:
axis = False
node_dict = dict_to_return.get(node.suffix)
if not node_dict:
node_dict = {}
#logging.info("Axis = {}".format(axis))
if axis: # we need a complicated axis-member relationship (similar date, but subcatigory)
# our context_ref here will have all the parts listed
for prefix in local_prefixes_that_matter:
context_ref = context_ref.replace("_{}".format(prefix), "")
context_split = context_ref.split("_")
#logging.info(context_split)
context_split = [x for x in context_split if x != '']
#logging.info(context_split)
#logging.info("{}:{} = {}".format(node.suffix, context_split, fact))
failed=False
#logging.info(context_split)
formatted_context_split = []
for index, item in enumerate(context_split):
#logging.info(index, item)
if index == 0:
# here we skip the first contextref bit because it's just the date
formatted_context_split.append(item)
continue
formatted_context_split.append(item)
context_split = formatted_context_split
if len(context_split) == 1:
pass
else:
node_dict = recursive_set_axis_member_dict(node,
node_dict,
context_split[1],
context_split,
date,
fact,)
continue
previous_entry = dict_to_return[node.suffix].get(date)
entry_dict = dict_to_return.get(node.suffix)
if previous_entry is not None:
if previous_entry != fact:
node_decimals = node.attrib.get("decimals")
existing_attrib = entry_dict.get("{}_attrib".format(date))
existing_decimals = existing_attrib.get("decimals")
if existing_decimals and node_decimals:
if existing_decimals > node_decimals:
continue
elif node_decimals > existing_decimals:
entry_dict[date] = fact
entry_dict["{}_attrib".format(date)] = node.attrib
elif node_decimals == existing_decimals:
if len(fact) > len(entry_dict.get(date)):
entry_dict[date] = fact
entry_dict["{}_attrib".format(date)] = node.attrib
else:
raise(Exception("{}|{}".format(node_decimals, existing_decimals)))
else:
if isinstance(previous_entry, list):
previous_entry.append(fact)
else:
entry_dict[date] = [previous_entry, fact]
else:
pass
else:
entry_dict[date] = fact
entry_dict["{}_attrib".format(date)] = node.attrib
label_dict = dict_to_return.get("label")
# then i'm going to look through the suffixes
for id_str, labels_to_move in label_dict.items():
for suffix_str, suffix_dict in dict_to_return.items():
if "_{}_".format(suffix_str) in id_str:
suffix_dict["label"] = labels_to_move
break
dict_to_return.pop("label", None)
dict_to_return = {ticker: dict_to_return}
json_filename = "{}_facts_dict.json".format(folder_name)
write_dict_as_json(dict_to_return, json_filename)
def recursive_set_axis_member_dict(node, node_dict, axis_or_member, axis_member_list, date, fact, axis_or_member_index=1):
if node_dict is None:
#logging.info(axis_member_list)
pass
axis_or_member_entry = node_dict.get(axis_or_member)
if axis_or_member_entry is None:
node_dict[axis_or_member] = {}
#logging.info(axis_or_member_index, axis_member_list)
if axis_or_member_index < len(axis_member_list)-1: # we aren't done with recursion
axis_or_member_dict = node_dict.get(axis_or_member)
node_dict[axis_or_member] = recursive_set_axis_member_dict(
node,
axis_or_member_dict,
axis_member_list[axis_or_member_index + 1],
axis_member_list,
date,
fact,
axis_or_member_index = axis_or_member_index + 1)
else:
axis_or_member_entry = node_dict.get(axis_or_member)
attrib = node.attrib
if not axis_or_member_entry:
node_dict[axis_or_member] = {date: fact, "{}_attrib".format(date): attrib}
else:
previous_fact = node_dict.get(axis_or_member).get(date)
if previous_fact is not None:
if previous_fact != fact:
node_decimals = node.attrib.get("decimals")
previous_attrib = node_dict.get(axis_or_member).get("{}_attrib".format(date))
existing_decimals = previous_attrib.get("decimals")
if existing_decimals > node_decimals:
'ignore this'
elif node_decimals > existing_decimals:
node_dict[date] = fact
node_dict["{}_attrib".format(date)] = node.attrib
elif node_decimals == existing_decimals:
if len(fact) > len(previous_fact):
node_dict[date] = fact
node_dict["{}_attrib".format(date)] = node.attrib
else:
logging.info("##### {} = {} vs {}".format(axis_or_member, previous_fact, fact))
logging.info(node_dict.get(axis_or_member))
raise(Exception("{}|{}".format(node_decimals, existing_decimals)))
else:
'duplicate'
node_dict[axis_or_member] = {date: fact, "{}_attrib".format(date): attrib}
return node_dict
def return_existing_facts_dict(ticker, form_type, date=None):
if date:
filename = os.path.join("XBRL_Data", ticker.lower(), form_type, "{}-{}_facts_dict.json".format(ticker.lower(), date))
if os.path.exits(filename):
return import_json(filename)
else:
return return_most_recent_facts_dict()
def return_most_recent_facts_dict(ticker, form_type):
folder_name = os.path.join("XBRL_Data", ticker.lower(), form_type)
most_recent = None
if not does_file_exist_in_dir(folder_name):
folder_name, form_type, make_folder = folder_path_form_type_conversion(ticker.lower(), form_type)
for filename in os.listdir(folder_name):
if "_facts_dict.json" in filename:
ticker_date = filename.replace("_facts_dict.json", "")
date = ticker_date.split("-")[1]
date_time_obj = datetime.datetime.strptime(date, "%Y%m%d")
if most_recent is None:
most_recent = (date_time_obj, filename)
else:
if date_time_obj > most_recent[0]:
most_recent = (date_time_obj, filename)
file_path = os.path.join("XBRL_Data", ticker.lower(), form_type, most_recent[1])
return import_json(file_path)
t_dict = anytree.findall_by_attr(root_node, "context_dict", maxlevel=2)[0].attrib
node_tuple = anytree.findall_by_attr(root_node, attribute_name, maxlevel=2)
if node_tuple:
if len(node_tuple) != 1:
logging.error("There are multiple attribute nodes with the same name. This should not happen.")
return
node = node_tuple[0]
if date is None:
return node
context_ref = None
context_ref_list = [key for key, value in context_dict.items() if value == date]
if len(context_ref_list) == 1:
return context_ref_list[0]
if not subcategory:
context_ref_list = [ref for ref in context_ref_list if not '_' in ref]
if len(context_ref_list) > 1:
logging.error("More than one base category date")
#logging.info(pp.pformat(context_ref_list))
sys.exit()
context_ref = context_ref_list[0]
else:
subcategory_list = []
for ref in context_ref_list:
ref_split_list = ref.split("_", maxsplit=1)
if ref_split_list:
if subcategory == ref_split_list[-1]:
subcategory_list.append(ref)
if not subcategory_list:
return
if len(subcategory_list) > 1:
logging.error("More than one subcategory date")
#logging.info(pp.pformat(context_ref_list))
sys.exit()
context_ref = subcategory_list[0]
if context_ref:
for subnode in anytree.PreOrderIter(node):
try:
subnode_context_ref = subnode.attrib.get("contextRef")
except:
continue
if subnode_context_ref:
if context_ref == subnode_context_ref:
return subnode
else:
logging.error("No attributes of that name")
def convert_to_datetime(string_date_YYYY_MM_DD):
string_date_list = string_date_YYYY_MM_DD.split(":")
#logging.info(string_date_list)
if len(string_date_list) == 2:
start_date = string_date_list[0]
end_date = string_date_list[1]
elif len(string_date_list) == 1:
end_date = string_date_list[0]
start_date = None
else:
logging.error("{} is not a valid date string".format(string_date_YYYY_MM_DD))
return [None, None, None]
if not (end_date[:4].isdigit() and end_date[5:7].isdigit() and end_date[8:].isdigit()):
logging.error("{} is not a valid date string".format(string_date_YYYY_MM_DD))
return [None, None, None]
end_datetime_object = datetime.datetime.strptime(end_date, "%Y-%m-%d")
if start_date:
start_datetime_object = datetime.datetime.strptime(start_date, "%Y-%m-%d")
else:
start_datetime_object = None
time_delta = None
if end_datetime_object and start_datetime_object:
time_delta = end_datetime_object - start_datetime_object
return [end_datetime_object, start_datetime_object, time_delta]
def y_or_q_and_form_type_from_limit_data(Y_or_Q, form_type):
if form_type and not Y_or_Q:
if form_type in ANNUAL_FORM_TYPES:
Y_or_Q = "Y"
elif form_type == "10-Q":
Y_or_Q = "Q"
elif Y_or_Q and not form_type:
if Y_or_Q == "Y":
raise(Exception("cannot know if 10-K or 20-F or 40-F"))
form_type = "10-K"
elif Y_or_Q == "Q":
form_type = "10-Q"
elif not (form_type or Y_or_Q):
Y_or_Q = "Y"
form_type = "10-K"
return Y_or_Q, form_type
def get_most_recent_data(root_node, attribute_name, Y_or_Q=None, form_type=None, subcategory=None):
return get_most_recent_multiple_instances(root_node, attribute_name, 1, Y_or_Q=Y_or_Q, form_type=form_type, subcategory=subcategory)[0]
def get_most_recent_multiple_instances(root_node, attribute_name, number_of_instances, Y_or_Q=None, form_type=None, subcategory=None):
if not Y_or_Q:
Y_or_Q, form_type = y_or_q_and_form_type_from_limit_data(Y_or_Q, form_type)
context_dict = anytree.findall_by_attr(root_node, "context_dict", maxlevel=2)[0].attrib
relevant_node = anytree.findall_by_attr(root_node, attribute_name, maxlevel=2)
if not relevant_node:
logging.warning("no relevant node")
return
if len(relevant_node) != 1:
logging.error("There are multiple attribute nodes with the same name. This should not happen.")
return
relevant_node = relevant_node[0]
node_contextRef_date_tuple_list = []
for node in anytree.PreOrderIter(relevant_node):
#logging.info(node)
basic_contextRef = None
if subcategory:
contextRef = return_context_ref(node)
if contextRef:
if subcategory in contextRef:
basic_contextRef = return_basic_context_ref(node)
elif is_basic_date_context_ref(node):
#logging.info('is basic_contextRef')
basic_contextRef = return_context_ref(node)
#print(basic_contextRef)
if basic_contextRef:
the_context = context_dict.get(basic_contextRef)
#logging.info(the_context)
node_contextRef_date_tuple_list.append([node, basic_contextRef, the_context])
if not node_contextRef_date_tuple_list:
logging.warning("no nodes matched")
narrowed_list = []
logging.info("Y_or_Q == {}".format(Y_or_Q))
if Y_or_Q == 'Y':
applicable_refs = ["YTD"]
if Y_or_Q == 'Q':
applicable_refs = ["Q4", "Q3", "Q2", "Q1", "QTD"]
for node_contextRef_date_tuple in node_contextRef_date_tuple_list:
for ref in applicable_refs:
if node_contextRef_date_tuple[1].endswith(ref):
narrowed_list.append(node_contextRef_date_tuple)
node_contextRef_date_tuple_list = narrowed_list
most_recent_list = []
for node_contextRef_date_tuple in node_contextRef_date_tuple_list:
context_date = node_contextRef_date_tuple[2]
end_datetime, start_date, time_delta = convert_to_datetime(context_date)
if not most_recent_list:
most_recent_list.append(node_contextRef_date_tuple)
for index, existing_node_contextRef_date_tuple_list in enumerate(most_recent_list):
newer_than_index = None
existing_context_date = existing_node_contextRef_date_tuple_list[2]
existing_end_datetime, existing_start_date, existing_time_delta = convert_to_datetime(existing_context_date)
if end_datetime > existing_end_datetime:
newer_than_index = index
elif end_datetime == existing_end_datetime:
try:
node_fact = node_contextRef_date_tuple_list[0].fact
except:
node_fact = None
try:
existing_fact = existing_node_contextRef_date_tuple_list[0].fact
except:
existing_fact = None
if node_fact == existing_fact:
if start_date == existing_start_date:
logging.warning("duplicate fact, skipping")
continue
else:
if time_delta is not None:
if Y_or_Q == "Y":
if time_delta.days() < 300:
logging.warning("quarterly term for annual request, skipping")
continue
elif Y_or_Q == "Q":
if time_delta.days() > 50:
logging.warning("annual term for quarterly request, skipping")
continue
logging.warning("odd, but near identical facts are being grouped together")
newer_than_index = index
if newer_than_index:
most_recent_list.insert(newer_than_index, node_contextRef_date_tuple)
if most_recent_list:
most_recent_list = [node_ref_date_triple[0] for node_ref_date_triple in most_recent_list]
if not most_recent_list:
logging.warning("There are no facts that match that search")
return
elif len(most_recent_list) < number_of_instances:
#logging.info([[x[1], x[2]] for x in most_recent_list])
return most_recent_list
else:
return most_recent_list[0:number_of_instances]
def print_all_simple_context_refs(root_node):
pattern = re.compile(r'[A-Z]{1,2}[0-9]{4}[A-Z]{1}[0-9]{1}(YTD|QTD)?(?=\s)')
simple_context_set = set()
context_ref_list = []
for node in anytree.PreOrderIter(root_node):
context_ref = None
try:
context_ref = node.attrib.get("contextRef")
if context_ref is not None:
context_ref_list.append(context_ref)
except:
continue
if context_ref is not None:
for ref in context_ref_list:
ref_split_list = ref.split("_", maxsplit=1)
if len(ref_split_list) == 1:
simple_context_set.add(ref)
big_string = ""
for ref in simple_context_set:
big_string = "{}{}\n".format(big_string, ref)
logging.info(big_string)
logging.info(type(big_string))
matches = pattern.finditer(big_string)
logging.info(len(simple_context_set))
match_list = [match for match in matches]
logging.info(len(match_list))
#logging.info(pp.pformat(match_list))
span_list = [match.span() for match in match_list]
str_list = [big_string[span[0]: span[1]] for span in span_list]
#logging.info(pp.pformat(str_list))
#logging.info(pp.pformat([x for x in simple_context_set if x not in str_list]))
def non_basic_context_ref_pattern(root_node, attribute_name = None):
if not attribute_name:
# print full contextref bases
context_dict = anytree.findall_by_attr(root_node, "context_dict", maxlevel=2)[0].attrib
context_ref_list = context_dict.keys()
ref_list = []
for ref in sorted(context_ref_list):
if len(ref.split("_")) > 1:
pass
#ref_list.append(ref.split("_")[0])
else:
#pass
ref_list.append(ref)
ref_list = list(set(ref_list))
logging.info("")
for ref in sorted(ref_list):
logging.info(ref)
if attribute_name:
attribute_node = anytree.findall_by_attr(root_node, attribute_name, maxlevel=2)
if not attribute_node:
return
attribute_node = attribute_node[0]
attribute_ref_list = []
for node in anytree.PreOrderIter(attribute_node):
try:
attribute_ref_list.append(node.attrib.get("contextRef"))
except:
pass
attribute_ref_list = sorted(list(set([ref.split("_")[0] for ref in attribute_ref_list if ref is not None])))
for ref in attribute_ref_list:
logging.info(ref)
def get_most_recent_annual_data(root_node, attribute_name, date=None, subcategory=None):
return get_most_recent_data(root_node, attribute_name, Y_or_Q="Y")
def get_most_recent_quarterly_data(root_node, attribute_name, date=None, subcategory=None):
return get_most_recent_data(root_node, attribute_name, Y_or_Q="Q")
def get_top_data_node(root_node, attribute_name):
relevant_node = anytree.findall_by_attr(root_node, attribute_name, maxlevel=2)
return relevant_node
def return_context_ref(node):
try:
return node.attrib.get('contextRef')
except:
return
def return_basic_context_ref(node):
full_contextRef = return_context_ref(node)
split_contextRef = return_split_context_ref_list(full_contextRef)
if split_contextRef:
basic_contextRef = split_contextRef[0]
return basic_contextRef
def return_split_context_ref_list(contextRef):
if contextRef is None:
return
return contextRef.split("_")
def is_basic_date_context_ref(node):
contextRef = return_context_ref(node)
if contextRef:
if len(contextRef.split("_")) == 1:
return True
def analayse_split_context_ref(node):
contextRef = return_context_ref(node)
split_contextRef = return_split_context_ref_list(contextRef)
if split_contextRef is None:
return
# the vast majority of the time this will happen
dict_to_return = {"base": split_contextRef[0]}
if len(split_contextRef) == 1:
return dict_to_return
#else
if "Axis" in contextRef:
dict_to_return.update(return_axis_based_context_ref_dict(split_contextRef))
#logging.info(pp.pformat(dict_to_return))
return dict_to_return
def return_axis_based_context_ref_dict(split_contextRef):
dict_to_return = {}
list_len = len(split_contextRef)
indices_of_axis_strs = []
double_check_list = [split_contextRef[0]]
for index, sub_string in enumerate(split_contextRef):
if sub_string.endswith("Axis"):
indices_of_axis_strs.append(index)
for index, axis_index in enumerate(indices_of_axis_strs):
index_str = ""
if index > 0:
index_str = "_{}".format(index+1)
axis_string = split_contextRef[axis_index]
dict_to_return.update({"axis_string{}".format(index_str): axis_string})
prefix = None
if axis_index > 0:
prefix = split_contextRef[axis_index - 1]
dict_to_return.update({"axis_prefix{}".format(index_str): prefix})
double_check_list.append(prefix)
double_check_list.append(axis_string)
subcategory_prefix = None
axis_subcategory = None
try:
subcategory_prefix = split_contextRef[axis_index + 1]
axis_subcategory = split_contextRef[axis_index + 2]
except:
pass
if subcategory_prefix and axis_subcategory:
dict_to_return.update({
"axis_subcategory_prefix{}".format(index_str): subcategory_prefix,
"axis_subcategory{}".format(index_str): axis_subcategory,
})
double_check_list.append(subcategory_prefix)
double_check_list.append(axis_subcategory)
if not double_check_list == split_contextRef:
axis_extra = [x for x in split_contextRef if x not in double_check_list]
dict_to_return.update({"axis_extra": axis_extra})
return dict_to_return
delete_after_import = False
testing_write_file = False
force_download = False
testing = False
if __name__ == "__main__":
if testing:
ticker_cik_list = [("AAPL", 320193), ("GOOG", 1652044), ("MRVL", 1058057), ("GRMN", 1121788), ("STX", 1137789), ("BIDU", 1329099), ("INFY", 1067491), ("WCRX", 1323854), ("CHKP", 1015922), ("TEVA", 818686), ("FLEX", 866374), ("LOGI", 1032975)]
randomize = False
date_specific = False
delete_after_import = False
testing_write_file = True
force_download = False
for ticker_cik in ticker_cik_list:
form = '10-K'
forms = ["10-K", "10-Q"]
form_choice = forms.index(form)
if randomize:
random_form = random.choice(forms)
logging.info(random_form)
form_choice = forms.index(random_form)
form_type = forms[form_choice]
xbrl_tree_root = main_download_and_convert(ticker_cik[0].lower(), ticker_cik[1], form_type, delete_files_after_import=delete_after_import, force_download=force_download)
my_dict = return_most_recent_facts_dict(ticker_cik[0], form)
#logging.info(pp.pformat(unit_ref_list))
#end of line | true | true |
f7ff5799af5e7c3aa6a1090f8d5c403ccc15629e | 22,915 | py | Python | release/scripts/startup/nodeitems_builtins.py | recogni/blender | af830498016c01c4847f00b51246bc8e3c88f6f6 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-01-30T01:03:05.000Z | 2020-01-30T01:03:05.000Z | release/scripts/startup/nodeitems_builtins.py | recogni/blender | af830498016c01c4847f00b51246bc8e3c88f6f6 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/startup/nodeitems_builtins.py | recogni/blender | af830498016c01c4847f00b51246bc8e3c88f6f6 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
import nodeitems_utils
from nodeitems_utils import (
NodeCategory,
NodeItem,
NodeItemCustom,
)
# Subclasses for standard node types
class SortedNodeCategory(NodeCategory):
def __init__(self, identifier, name, description="", items=None):
# for builtin nodes the convention is to sort by name
if isinstance(items, list):
items = sorted(items, key=lambda item: item.label.lower())
super().__init__(identifier, name, description, items)
class CompositorNodeCategory(SortedNodeCategory):
@classmethod
def poll(cls, context):
return (context.space_data.type == 'NODE_EDITOR' and
context.space_data.tree_type == 'CompositorNodeTree')
class ShaderNodeCategory(SortedNodeCategory):
@classmethod
def poll(cls, context):
return (context.space_data.type == 'NODE_EDITOR' and
context.space_data.tree_type == 'ShaderNodeTree')
class TextureNodeCategory(SortedNodeCategory):
@classmethod
def poll(cls, context):
return (context.space_data.type == 'NODE_EDITOR' and
context.space_data.tree_type == 'TextureNodeTree')
class GeometryNodeCategory(SortedNodeCategory):
@classmethod
def poll(cls, context):
return (context.space_data.type == 'NODE_EDITOR' and
context.space_data.tree_type == 'GeometryNodeTree')
# menu entry for node group tools
def group_tools_draw(self, layout, context):
layout.operator("node.group_make")
layout.operator("node.group_ungroup")
layout.separator()
# maps node tree type to group node type
node_tree_group_type = {
'CompositorNodeTree': 'CompositorNodeGroup',
'ShaderNodeTree': 'ShaderNodeGroup',
'TextureNodeTree': 'TextureNodeGroup',
'GeometryNodeTree': 'GeometryNodeGroup',
}
# generic node group items generator for shader, compositor, geometry and texture node groups
def node_group_items(context):
if context is None:
return
space = context.space_data
if not space:
return
ntree = space.edit_tree
if not ntree:
return
yield NodeItemCustom(draw=group_tools_draw)
yield NodeItem("NodeGroupInput", poll=group_input_output_item_poll)
yield NodeItem("NodeGroupOutput", poll=group_input_output_item_poll)
yield NodeItemCustom(draw=lambda self, layout, context: layout.separator())
def contains_group(nodetree, group):
if nodetree == group:
return True
else:
for node in nodetree.nodes:
if node.bl_idname in node_tree_group_type.values() and node.node_tree is not None:
if contains_group(node.node_tree, group):
return True
return False
for group in context.blend_data.node_groups:
if group.bl_idname != ntree.bl_idname:
continue
# filter out recursive groups
if contains_group(group, ntree):
continue
# filter out hidden nodetrees
if group.name.startswith('.'):
continue
yield NodeItem(node_tree_group_type[group.bl_idname],
group.name,
{"node_tree": "bpy.data.node_groups[%r]" % group.name})
# only show input/output nodes inside node groups
def group_input_output_item_poll(context):
space = context.space_data
if space.edit_tree in bpy.data.node_groups.values():
return True
return False
# only show input/output nodes when editing line style node trees
def line_style_shader_nodes_poll(context):
snode = context.space_data
return (snode.tree_type == 'ShaderNodeTree' and
snode.shader_type == 'LINESTYLE')
# only show nodes working in world node trees
def world_shader_nodes_poll(context):
snode = context.space_data
return (snode.tree_type == 'ShaderNodeTree' and
snode.shader_type == 'WORLD')
# only show nodes working in object node trees
def object_shader_nodes_poll(context):
snode = context.space_data
return (snode.tree_type == 'ShaderNodeTree' and
snode.shader_type == 'OBJECT')
def cycles_shader_nodes_poll(context):
return context.engine == 'CYCLES'
def eevee_shader_nodes_poll(context):
return context.engine == 'BLENDER_EEVEE'
def eevee_cycles_shader_nodes_poll(context):
return (cycles_shader_nodes_poll(context) or
eevee_shader_nodes_poll(context))
def object_cycles_shader_nodes_poll(context):
return (object_shader_nodes_poll(context) and
cycles_shader_nodes_poll(context))
def object_eevee_shader_nodes_poll(context):
return (object_shader_nodes_poll(context) and
eevee_shader_nodes_poll(context))
def object_eevee_cycles_shader_nodes_poll(context):
return (object_shader_nodes_poll(context) and
eevee_cycles_shader_nodes_poll(context))
# All standard node categories currently used in nodes.
shader_node_categories = [
# Shader Nodes (Cycles and Eevee)
ShaderNodeCategory("SH_NEW_INPUT", "Input", items=[
NodeItem("ShaderNodeTexCoord"),
NodeItem("ShaderNodeAttribute"),
NodeItem("ShaderNodeLightPath"),
NodeItem("ShaderNodeFresnel"),
NodeItem("ShaderNodeLayerWeight"),
NodeItem("ShaderNodeRGB"),
NodeItem("ShaderNodeValue"),
NodeItem("ShaderNodeTangent"),
NodeItem("ShaderNodeNewGeometry"),
NodeItem("ShaderNodeWireframe"),
NodeItem("ShaderNodeBevel"),
NodeItem("ShaderNodeAmbientOcclusion"),
NodeItem("ShaderNodeObjectInfo"),
NodeItem("ShaderNodeHairInfo"),
NodeItem("ShaderNodeVolumeInfo"),
NodeItem("ShaderNodeParticleInfo"),
NodeItem("ShaderNodeCameraData"),
NodeItem("ShaderNodeUVMap"),
NodeItem("ShaderNodeVertexColor"),
NodeItem("ShaderNodeUVAlongStroke", poll=line_style_shader_nodes_poll),
]),
ShaderNodeCategory("SH_NEW_OUTPUT", "Output", items=[
NodeItem("ShaderNodeOutputMaterial", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeOutputLight", poll=object_cycles_shader_nodes_poll),
NodeItem("ShaderNodeOutputAOV"),
NodeItem("ShaderNodeOutputWorld", poll=world_shader_nodes_poll),
NodeItem("ShaderNodeOutputLineStyle", poll=line_style_shader_nodes_poll),
]),
ShaderNodeCategory("SH_NEW_SHADER", "Shader", items=[
NodeItem("ShaderNodeMixShader", poll=eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeAddShader", poll=eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfDiffuse", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfPrincipled", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfGlossy", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfTransparent", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfRefraction", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfGlass", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfTranslucent", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfAnisotropic", poll=object_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfVelvet", poll=object_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfToon", poll=object_cycles_shader_nodes_poll),
NodeItem("ShaderNodeSubsurfaceScattering", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeEmission", poll=eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfHair", poll=object_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBackground", poll=world_shader_nodes_poll),
NodeItem("ShaderNodeHoldout", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeVolumeAbsorption", poll=eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeVolumeScatter", poll=eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeVolumePrincipled"),
NodeItem("ShaderNodeEeveeSpecular", poll=object_eevee_shader_nodes_poll),
NodeItem("ShaderNodeBsdfHairPrincipled", poll=object_cycles_shader_nodes_poll)
]),
ShaderNodeCategory("SH_NEW_TEXTURE", "Texture", items=[
NodeItem("ShaderNodeTexImage"),
NodeItem("ShaderNodeTexEnvironment"),
NodeItem("ShaderNodeTexSky"),
NodeItem("ShaderNodeTexNoise"),
NodeItem("ShaderNodeTexWave"),
NodeItem("ShaderNodeTexVoronoi"),
NodeItem("ShaderNodeTexMusgrave"),
NodeItem("ShaderNodeTexGradient"),
NodeItem("ShaderNodeTexMagic"),
NodeItem("ShaderNodeTexChecker"),
NodeItem("ShaderNodeTexBrick"),
NodeItem("ShaderNodeTexPointDensity"),
NodeItem("ShaderNodeTexIES"),
NodeItem("ShaderNodeTexWhiteNoise"),
]),
ShaderNodeCategory("SH_NEW_OP_COLOR", "Color", items=[
NodeItem("ShaderNodeMixRGB"),
NodeItem("ShaderNodeRGBCurve"),
NodeItem("ShaderNodeInvert"),
NodeItem("ShaderNodeLightFalloff"),
NodeItem("ShaderNodeHueSaturation"),
NodeItem("ShaderNodeGamma"),
NodeItem("ShaderNodeBrightContrast"),
]),
ShaderNodeCategory("SH_NEW_OP_VECTOR", "Vector", items=[
NodeItem("ShaderNodeMapping"),
NodeItem("ShaderNodeBump"),
NodeItem("ShaderNodeDisplacement"),
NodeItem("ShaderNodeVectorDisplacement"),
NodeItem("ShaderNodeNormalMap"),
NodeItem("ShaderNodeNormal"),
NodeItem("ShaderNodeVectorCurve"),
NodeItem("ShaderNodeVectorRotate"),
NodeItem("ShaderNodeVectorTransform"),
]),
ShaderNodeCategory("SH_NEW_CONVERTOR", "Converter", items=[
NodeItem("ShaderNodeMapRange"),
NodeItem("ShaderNodeClamp"),
NodeItem("ShaderNodeMath"),
NodeItem("ShaderNodeValToRGB"),
NodeItem("ShaderNodeRGBToBW"),
NodeItem("ShaderNodeShaderToRGB", poll=object_eevee_shader_nodes_poll),
NodeItem("ShaderNodeVectorMath"),
NodeItem("ShaderNodeSeparateRGB"),
NodeItem("ShaderNodeCombineRGB"),
NodeItem("ShaderNodeSeparateXYZ"),
NodeItem("ShaderNodeCombineXYZ"),
NodeItem("ShaderNodeSeparateHSV"),
NodeItem("ShaderNodeCombineHSV"),
NodeItem("ShaderNodeWavelength"),
NodeItem("ShaderNodeBlackbody"),
]),
ShaderNodeCategory("SH_NEW_SCRIPT", "Script", items=[
NodeItem("ShaderNodeScript"),
]),
ShaderNodeCategory("SH_NEW_GROUP", "Group", items=node_group_items),
ShaderNodeCategory("SH_NEW_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
]),
]
compositor_node_categories = [
# Compositor Nodes
CompositorNodeCategory("CMP_INPUT", "Input", items=[
NodeItem("CompositorNodeRLayers"),
NodeItem("CompositorNodeImage"),
NodeItem("CompositorNodeMovieClip"),
NodeItem("CompositorNodeMask"),
NodeItem("CompositorNodeRGB"),
NodeItem("CompositorNodeValue"),
NodeItem("CompositorNodeTexture"),
NodeItem("CompositorNodeBokehImage"),
NodeItem("CompositorNodeTime"),
NodeItem("CompositorNodeTrackPos"),
]),
CompositorNodeCategory("CMP_OUTPUT", "Output", items=[
NodeItem("CompositorNodeComposite"),
NodeItem("CompositorNodeViewer"),
NodeItem("CompositorNodeSplitViewer"),
NodeItem("CompositorNodeOutputFile"),
NodeItem("CompositorNodeLevels"),
]),
CompositorNodeCategory("CMP_OP_COLOR", "Color", items=[
NodeItem("CompositorNodeMixRGB"),
NodeItem("CompositorNodeAlphaOver"),
NodeItem("CompositorNodeInvert"),
NodeItem("CompositorNodeCurveRGB"),
NodeItem("CompositorNodeHueSat"),
NodeItem("CompositorNodeColorBalance"),
NodeItem("CompositorNodeHueCorrect"),
NodeItem("CompositorNodeBrightContrast"),
NodeItem("CompositorNodeGamma"),
NodeItem("CompositorNodeExposure"),
NodeItem("CompositorNodeColorCorrection"),
NodeItem("CompositorNodeTonemap"),
NodeItem("CompositorNodeZcombine"),
]),
CompositorNodeCategory("CMP_CONVERTOR", "Converter", items=[
NodeItem("CompositorNodeMath"),
NodeItem("CompositorNodeValToRGB"),
NodeItem("CompositorNodeSetAlpha"),
NodeItem("CompositorNodePremulKey"),
NodeItem("CompositorNodeIDMask"),
NodeItem("CompositorNodeRGBToBW"),
NodeItem("CompositorNodeSepRGBA"),
NodeItem("CompositorNodeCombRGBA"),
NodeItem("CompositorNodeSepHSVA"),
NodeItem("CompositorNodeCombHSVA"),
NodeItem("CompositorNodeSepYUVA"),
NodeItem("CompositorNodeCombYUVA"),
NodeItem("CompositorNodeSepYCCA"),
NodeItem("CompositorNodeCombYCCA"),
NodeItem("CompositorNodeSwitchView"),
]),
CompositorNodeCategory("CMP_OP_FILTER", "Filter", items=[
NodeItem("CompositorNodeBlur"),
NodeItem("CompositorNodeBilateralblur"),
NodeItem("CompositorNodeDilateErode"),
NodeItem("CompositorNodeDespeckle"),
NodeItem("CompositorNodeFilter"),
NodeItem("CompositorNodeBokehBlur"),
NodeItem("CompositorNodeVecBlur"),
NodeItem("CompositorNodeDefocus"),
NodeItem("CompositorNodeGlare"),
NodeItem("CompositorNodeInpaint"),
NodeItem("CompositorNodeDBlur"),
NodeItem("CompositorNodePixelate"),
NodeItem("CompositorNodeSunBeams"),
NodeItem("CompositorNodeDenoise"),
NodeItem("CompositorNodePython"),
NodeItem("CompositorNodeRecogniObjectID"),
]),
CompositorNodeCategory("CMP_OP_VECTOR", "Vector", items=[
NodeItem("CompositorNodeNormal"),
NodeItem("CompositorNodeMapValue"),
NodeItem("CompositorNodeMapRange"),
NodeItem("CompositorNodeNormalize"),
NodeItem("CompositorNodeCurveVec"),
]),
CompositorNodeCategory("CMP_MATTE", "Matte", items=[
NodeItem("CompositorNodeKeying"),
NodeItem("CompositorNodeKeyingScreen"),
NodeItem("CompositorNodeChannelMatte"),
NodeItem("CompositorNodeColorSpill"),
NodeItem("CompositorNodeBoxMask"),
NodeItem("CompositorNodeEllipseMask"),
NodeItem("CompositorNodeLumaMatte"),
NodeItem("CompositorNodeDiffMatte"),
NodeItem("CompositorNodeDistanceMatte"),
NodeItem("CompositorNodeChromaMatte"),
NodeItem("CompositorNodeColorMatte"),
NodeItem("CompositorNodeDoubleEdgeMask"),
NodeItem("CompositorNodeCryptomatte"),
NodeItem("CompositorNodeCryptomatteV2"),
]),
CompositorNodeCategory("CMP_DISTORT", "Distort", items=[
NodeItem("CompositorNodeScale"),
NodeItem("CompositorNodeLensdist"),
NodeItem("CompositorNodeMovieDistortion"),
NodeItem("CompositorNodeTranslate"),
NodeItem("CompositorNodeRotate"),
NodeItem("CompositorNodeFlip"),
NodeItem("CompositorNodeCrop"),
NodeItem("CompositorNodeDisplace"),
NodeItem("CompositorNodeMapUV"),
NodeItem("CompositorNodeTransform"),
NodeItem("CompositorNodeStabilize"),
NodeItem("CompositorNodePlaneTrackDeform"),
NodeItem("CompositorNodeCornerPin"),
]),
CompositorNodeCategory("CMP_GROUP", "Group", items=node_group_items),
CompositorNodeCategory("CMP_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
NodeItem("CompositorNodeSwitch"),
]),
]
texture_node_categories = [
# Texture Nodes
TextureNodeCategory("TEX_INPUT", "Input", items=[
NodeItem("TextureNodeCurveTime"),
NodeItem("TextureNodeCoordinates"),
NodeItem("TextureNodeTexture"),
NodeItem("TextureNodeImage"),
]),
TextureNodeCategory("TEX_OUTPUT", "Output", items=[
NodeItem("TextureNodeOutput"),
NodeItem("TextureNodeViewer"),
]),
TextureNodeCategory("TEX_OP_COLOR", "Color", items=[
NodeItem("TextureNodeMixRGB"),
NodeItem("TextureNodeCurveRGB"),
NodeItem("TextureNodeInvert"),
NodeItem("TextureNodeHueSaturation"),
NodeItem("TextureNodeCompose"),
NodeItem("TextureNodeDecompose"),
]),
TextureNodeCategory("TEX_PATTERN", "Pattern", items=[
NodeItem("TextureNodeChecker"),
NodeItem("TextureNodeBricks"),
]),
TextureNodeCategory("TEX_TEXTURE", "Textures", items=[
NodeItem("TextureNodeTexNoise"),
NodeItem("TextureNodeTexDistNoise"),
NodeItem("TextureNodeTexClouds"),
NodeItem("TextureNodeTexBlend"),
NodeItem("TextureNodeTexVoronoi"),
NodeItem("TextureNodeTexMagic"),
NodeItem("TextureNodeTexMarble"),
NodeItem("TextureNodeTexWood"),
NodeItem("TextureNodeTexMusgrave"),
NodeItem("TextureNodeTexStucci"),
]),
TextureNodeCategory("TEX_CONVERTOR", "Converter", items=[
NodeItem("TextureNodeMath"),
NodeItem("TextureNodeValToRGB"),
NodeItem("TextureNodeRGBToBW"),
NodeItem("TextureNodeValToNor"),
NodeItem("TextureNodeDistance"),
]),
TextureNodeCategory("TEX_DISTORT", "Distort", items=[
NodeItem("TextureNodeScale"),
NodeItem("TextureNodeTranslate"),
NodeItem("TextureNodeRotate"),
NodeItem("TextureNodeAt"),
]),
TextureNodeCategory("TEX_GROUP", "Group", items=node_group_items),
TextureNodeCategory("TEX_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
]),
]
def not_implemented_node(idname):
NodeType = getattr(bpy.types, idname)
name = NodeType.bl_rna.name
label = "%s (mockup)" % name
return NodeItem(idname, label=label)
geometry_node_categories = [
# Geometry Nodes
GeometryNodeCategory("GEO_ATTRIBUTE", "Attribute", items=[
NodeItem("GeometryNodeAttributeRandomize"),
NodeItem("GeometryNodeAttributeMath"),
NodeItem("GeometryNodeAttributeCompare"),
NodeItem("GeometryNodeAttributeConvert"),
NodeItem("GeometryNodeAttributeFill"),
NodeItem("GeometryNodeAttributeMix"),
NodeItem("GeometryNodeAttributeProximity"),
NodeItem("GeometryNodeAttributeColorRamp"),
NodeItem("GeometryNodeAttributeVectorMath"),
NodeItem("GeometryNodeAttributeSampleTexture"),
NodeItem("GeometryNodeAttributeCombineXYZ"),
NodeItem("GeometryNodeAttributeSeparateXYZ"),
NodeItem("GeometryNodeAttributeRemove"),
]),
GeometryNodeCategory("GEO_COLOR", "Color", items=[
NodeItem("ShaderNodeValToRGB"),
NodeItem("ShaderNodeSeparateRGB"),
NodeItem("ShaderNodeCombineRGB"),
]),
GeometryNodeCategory("GEO_GEOMETRY", "Geometry", items=[
NodeItem("GeometryNodeTransform"),
NodeItem("GeometryNodeJoinGeometry"),
]),
GeometryNodeCategory("GEO_INPUT", "Input", items=[
NodeItem("GeometryNodeObjectInfo"),
NodeItem("GeometryNodeCollectionInfo"),
NodeItem("FunctionNodeRandomFloat"),
NodeItem("ShaderNodeValue"),
NodeItem("FunctionNodeInputString"),
NodeItem("FunctionNodeInputVector"),
NodeItem("GeometryNodeIsViewport"),
]),
GeometryNodeCategory("GEO_MESH", "Mesh", items=[
NodeItem("GeometryNodeBoolean"),
NodeItem("GeometryNodeTriangulate"),
NodeItem("GeometryNodeEdgeSplit"),
NodeItem("GeometryNodeSubdivisionSurface"),
NodeItem("GeometryNodeSubdivide"),
# These should be in a sub-menu, but that requires a refactor to build the add menu manually.
NodeItem("GeometryNodeMeshCube"),
NodeItem("GeometryNodeMeshCircle"),
NodeItem("GeometryNodeMeshUVSphere"),
NodeItem("GeometryNodeMeshIcoSphere"),
NodeItem("GeometryNodeMeshCylinder"),
NodeItem("GeometryNodeMeshCone"),
NodeItem("GeometryNodeMeshLine"),
NodeItem("GeometryNodeMeshPlane"),
]),
GeometryNodeCategory("GEO_POINT", "Point", items=[
NodeItem("GeometryNodePointDistribute"),
NodeItem("GeometryNodePointInstance"),
NodeItem("GeometryNodePointSeparate"),
NodeItem("GeometryNodePointScale"),
NodeItem("GeometryNodePointTranslate"),
NodeItem("GeometryNodeRotatePoints"),
NodeItem("GeometryNodeAlignRotationToVector"),
]),
GeometryNodeCategory("GEO_VOLUME", "Volume", items=[
NodeItem("GeometryNodePointsToVolume"),
NodeItem("GeometryNodeVolumeToMesh"),
]),
GeometryNodeCategory("GEO_UTILITIES", "Utilities", items=[
NodeItem("ShaderNodeMapRange"),
NodeItem("ShaderNodeClamp"),
NodeItem("ShaderNodeMath"),
NodeItem("FunctionNodeBooleanMath"),
NodeItem("FunctionNodeFloatCompare"),
]),
GeometryNodeCategory("GEO_VECTOR", "Vector", items=[
NodeItem("ShaderNodeSeparateXYZ"),
NodeItem("ShaderNodeCombineXYZ"),
NodeItem("ShaderNodeVectorMath"),
NodeItem("ShaderNodeVectorRotate"),
]),
GeometryNodeCategory("GEO_GROUP", "Group", items=node_group_items),
GeometryNodeCategory("GEO_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
]),
# NodeItem("FunctionNodeCombineStrings"),
# NodeItem("FunctionNodeGroupInstanceID"),
]
def register():
nodeitems_utils.register_node_categories('SHADER', shader_node_categories)
nodeitems_utils.register_node_categories('COMPOSITING', compositor_node_categories)
nodeitems_utils.register_node_categories('TEXTURE', texture_node_categories)
nodeitems_utils.register_node_categories('GEOMETRY', geometry_node_categories)
def unregister():
nodeitems_utils.unregister_node_categories('SHADER')
nodeitems_utils.unregister_node_categories('COMPOSITING')
nodeitems_utils.unregister_node_categories('TEXTURE')
nodeitems_utils.unregister_node_categories('GEOMETRY')
if __name__ == "__main__":
register()
| 38.971088 | 101 | 0.695134 | m.label.lower())
super().__init__(identifier, name, description, items)
class CompositorNodeCategory(SortedNodeCategory):
@classmethod
def poll(cls, context):
return (context.space_data.type == 'NODE_EDITOR' and
context.space_data.tree_type == 'CompositorNodeTree')
class ShaderNodeCategory(SortedNodeCategory):
@classmethod
def poll(cls, context):
return (context.space_data.type == 'NODE_EDITOR' and
context.space_data.tree_type == 'ShaderNodeTree')
class TextureNodeCategory(SortedNodeCategory):
@classmethod
def poll(cls, context):
return (context.space_data.type == 'NODE_EDITOR' and
context.space_data.tree_type == 'TextureNodeTree')
class GeometryNodeCategory(SortedNodeCategory):
@classmethod
def poll(cls, context):
return (context.space_data.type == 'NODE_EDITOR' and
context.space_data.tree_type == 'GeometryNodeTree')
def group_tools_draw(self, layout, context):
layout.operator("node.group_make")
layout.operator("node.group_ungroup")
layout.separator()
node_tree_group_type = {
'CompositorNodeTree': 'CompositorNodeGroup',
'ShaderNodeTree': 'ShaderNodeGroup',
'TextureNodeTree': 'TextureNodeGroup',
'GeometryNodeTree': 'GeometryNodeGroup',
}
def node_group_items(context):
if context is None:
return
space = context.space_data
if not space:
return
ntree = space.edit_tree
if not ntree:
return
yield NodeItemCustom(draw=group_tools_draw)
yield NodeItem("NodeGroupInput", poll=group_input_output_item_poll)
yield NodeItem("NodeGroupOutput", poll=group_input_output_item_poll)
yield NodeItemCustom(draw=lambda self, layout, context: layout.separator())
def contains_group(nodetree, group):
if nodetree == group:
return True
else:
for node in nodetree.nodes:
if node.bl_idname in node_tree_group_type.values() and node.node_tree is not None:
if contains_group(node.node_tree, group):
return True
return False
for group in context.blend_data.node_groups:
if group.bl_idname != ntree.bl_idname:
continue
if contains_group(group, ntree):
continue
if group.name.startswith('.'):
continue
yield NodeItem(node_tree_group_type[group.bl_idname],
group.name,
{"node_tree": "bpy.data.node_groups[%r]" % group.name})
def group_input_output_item_poll(context):
space = context.space_data
if space.edit_tree in bpy.data.node_groups.values():
return True
return False
def line_style_shader_nodes_poll(context):
snode = context.space_data
return (snode.tree_type == 'ShaderNodeTree' and
snode.shader_type == 'LINESTYLE')
def world_shader_nodes_poll(context):
snode = context.space_data
return (snode.tree_type == 'ShaderNodeTree' and
snode.shader_type == 'WORLD')
def object_shader_nodes_poll(context):
snode = context.space_data
return (snode.tree_type == 'ShaderNodeTree' and
snode.shader_type == 'OBJECT')
def cycles_shader_nodes_poll(context):
return context.engine == 'CYCLES'
def eevee_shader_nodes_poll(context):
return context.engine == 'BLENDER_EEVEE'
def eevee_cycles_shader_nodes_poll(context):
return (cycles_shader_nodes_poll(context) or
eevee_shader_nodes_poll(context))
def object_cycles_shader_nodes_poll(context):
return (object_shader_nodes_poll(context) and
cycles_shader_nodes_poll(context))
def object_eevee_shader_nodes_poll(context):
return (object_shader_nodes_poll(context) and
eevee_shader_nodes_poll(context))
def object_eevee_cycles_shader_nodes_poll(context):
return (object_shader_nodes_poll(context) and
eevee_cycles_shader_nodes_poll(context))
shader_node_categories = [
ShaderNodeCategory("SH_NEW_INPUT", "Input", items=[
NodeItem("ShaderNodeTexCoord"),
NodeItem("ShaderNodeAttribute"),
NodeItem("ShaderNodeLightPath"),
NodeItem("ShaderNodeFresnel"),
NodeItem("ShaderNodeLayerWeight"),
NodeItem("ShaderNodeRGB"),
NodeItem("ShaderNodeValue"),
NodeItem("ShaderNodeTangent"),
NodeItem("ShaderNodeNewGeometry"),
NodeItem("ShaderNodeWireframe"),
NodeItem("ShaderNodeBevel"),
NodeItem("ShaderNodeAmbientOcclusion"),
NodeItem("ShaderNodeObjectInfo"),
NodeItem("ShaderNodeHairInfo"),
NodeItem("ShaderNodeVolumeInfo"),
NodeItem("ShaderNodeParticleInfo"),
NodeItem("ShaderNodeCameraData"),
NodeItem("ShaderNodeUVMap"),
NodeItem("ShaderNodeVertexColor"),
NodeItem("ShaderNodeUVAlongStroke", poll=line_style_shader_nodes_poll),
]),
ShaderNodeCategory("SH_NEW_OUTPUT", "Output", items=[
NodeItem("ShaderNodeOutputMaterial", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeOutputLight", poll=object_cycles_shader_nodes_poll),
NodeItem("ShaderNodeOutputAOV"),
NodeItem("ShaderNodeOutputWorld", poll=world_shader_nodes_poll),
NodeItem("ShaderNodeOutputLineStyle", poll=line_style_shader_nodes_poll),
]),
ShaderNodeCategory("SH_NEW_SHADER", "Shader", items=[
NodeItem("ShaderNodeMixShader", poll=eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeAddShader", poll=eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfDiffuse", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfPrincipled", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfGlossy", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfTransparent", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfRefraction", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfGlass", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfTranslucent", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfAnisotropic", poll=object_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfVelvet", poll=object_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfToon", poll=object_cycles_shader_nodes_poll),
NodeItem("ShaderNodeSubsurfaceScattering", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeEmission", poll=eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBsdfHair", poll=object_cycles_shader_nodes_poll),
NodeItem("ShaderNodeBackground", poll=world_shader_nodes_poll),
NodeItem("ShaderNodeHoldout", poll=object_eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeVolumeAbsorption", poll=eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeVolumeScatter", poll=eevee_cycles_shader_nodes_poll),
NodeItem("ShaderNodeVolumePrincipled"),
NodeItem("ShaderNodeEeveeSpecular", poll=object_eevee_shader_nodes_poll),
NodeItem("ShaderNodeBsdfHairPrincipled", poll=object_cycles_shader_nodes_poll)
]),
ShaderNodeCategory("SH_NEW_TEXTURE", "Texture", items=[
NodeItem("ShaderNodeTexImage"),
NodeItem("ShaderNodeTexEnvironment"),
NodeItem("ShaderNodeTexSky"),
NodeItem("ShaderNodeTexNoise"),
NodeItem("ShaderNodeTexWave"),
NodeItem("ShaderNodeTexVoronoi"),
NodeItem("ShaderNodeTexMusgrave"),
NodeItem("ShaderNodeTexGradient"),
NodeItem("ShaderNodeTexMagic"),
NodeItem("ShaderNodeTexChecker"),
NodeItem("ShaderNodeTexBrick"),
NodeItem("ShaderNodeTexPointDensity"),
NodeItem("ShaderNodeTexIES"),
NodeItem("ShaderNodeTexWhiteNoise"),
]),
ShaderNodeCategory("SH_NEW_OP_COLOR", "Color", items=[
NodeItem("ShaderNodeMixRGB"),
NodeItem("ShaderNodeRGBCurve"),
NodeItem("ShaderNodeInvert"),
NodeItem("ShaderNodeLightFalloff"),
NodeItem("ShaderNodeHueSaturation"),
NodeItem("ShaderNodeGamma"),
NodeItem("ShaderNodeBrightContrast"),
]),
ShaderNodeCategory("SH_NEW_OP_VECTOR", "Vector", items=[
NodeItem("ShaderNodeMapping"),
NodeItem("ShaderNodeBump"),
NodeItem("ShaderNodeDisplacement"),
NodeItem("ShaderNodeVectorDisplacement"),
NodeItem("ShaderNodeNormalMap"),
NodeItem("ShaderNodeNormal"),
NodeItem("ShaderNodeVectorCurve"),
NodeItem("ShaderNodeVectorRotate"),
NodeItem("ShaderNodeVectorTransform"),
]),
ShaderNodeCategory("SH_NEW_CONVERTOR", "Converter", items=[
NodeItem("ShaderNodeMapRange"),
NodeItem("ShaderNodeClamp"),
NodeItem("ShaderNodeMath"),
NodeItem("ShaderNodeValToRGB"),
NodeItem("ShaderNodeRGBToBW"),
NodeItem("ShaderNodeShaderToRGB", poll=object_eevee_shader_nodes_poll),
NodeItem("ShaderNodeVectorMath"),
NodeItem("ShaderNodeSeparateRGB"),
NodeItem("ShaderNodeCombineRGB"),
NodeItem("ShaderNodeSeparateXYZ"),
NodeItem("ShaderNodeCombineXYZ"),
NodeItem("ShaderNodeSeparateHSV"),
NodeItem("ShaderNodeCombineHSV"),
NodeItem("ShaderNodeWavelength"),
NodeItem("ShaderNodeBlackbody"),
]),
ShaderNodeCategory("SH_NEW_SCRIPT", "Script", items=[
NodeItem("ShaderNodeScript"),
]),
ShaderNodeCategory("SH_NEW_GROUP", "Group", items=node_group_items),
ShaderNodeCategory("SH_NEW_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
]),
]
compositor_node_categories = [
CompositorNodeCategory("CMP_INPUT", "Input", items=[
NodeItem("CompositorNodeRLayers"),
NodeItem("CompositorNodeImage"),
NodeItem("CompositorNodeMovieClip"),
NodeItem("CompositorNodeMask"),
NodeItem("CompositorNodeRGB"),
NodeItem("CompositorNodeValue"),
NodeItem("CompositorNodeTexture"),
NodeItem("CompositorNodeBokehImage"),
NodeItem("CompositorNodeTime"),
NodeItem("CompositorNodeTrackPos"),
]),
CompositorNodeCategory("CMP_OUTPUT", "Output", items=[
NodeItem("CompositorNodeComposite"),
NodeItem("CompositorNodeViewer"),
NodeItem("CompositorNodeSplitViewer"),
NodeItem("CompositorNodeOutputFile"),
NodeItem("CompositorNodeLevels"),
]),
CompositorNodeCategory("CMP_OP_COLOR", "Color", items=[
NodeItem("CompositorNodeMixRGB"),
NodeItem("CompositorNodeAlphaOver"),
NodeItem("CompositorNodeInvert"),
NodeItem("CompositorNodeCurveRGB"),
NodeItem("CompositorNodeHueSat"),
NodeItem("CompositorNodeColorBalance"),
NodeItem("CompositorNodeHueCorrect"),
NodeItem("CompositorNodeBrightContrast"),
NodeItem("CompositorNodeGamma"),
NodeItem("CompositorNodeExposure"),
NodeItem("CompositorNodeColorCorrection"),
NodeItem("CompositorNodeTonemap"),
NodeItem("CompositorNodeZcombine"),
]),
CompositorNodeCategory("CMP_CONVERTOR", "Converter", items=[
NodeItem("CompositorNodeMath"),
NodeItem("CompositorNodeValToRGB"),
NodeItem("CompositorNodeSetAlpha"),
NodeItem("CompositorNodePremulKey"),
NodeItem("CompositorNodeIDMask"),
NodeItem("CompositorNodeRGBToBW"),
NodeItem("CompositorNodeSepRGBA"),
NodeItem("CompositorNodeCombRGBA"),
NodeItem("CompositorNodeSepHSVA"),
NodeItem("CompositorNodeCombHSVA"),
NodeItem("CompositorNodeSepYUVA"),
NodeItem("CompositorNodeCombYUVA"),
NodeItem("CompositorNodeSepYCCA"),
NodeItem("CompositorNodeCombYCCA"),
NodeItem("CompositorNodeSwitchView"),
]),
CompositorNodeCategory("CMP_OP_FILTER", "Filter", items=[
NodeItem("CompositorNodeBlur"),
NodeItem("CompositorNodeBilateralblur"),
NodeItem("CompositorNodeDilateErode"),
NodeItem("CompositorNodeDespeckle"),
NodeItem("CompositorNodeFilter"),
NodeItem("CompositorNodeBokehBlur"),
NodeItem("CompositorNodeVecBlur"),
NodeItem("CompositorNodeDefocus"),
NodeItem("CompositorNodeGlare"),
NodeItem("CompositorNodeInpaint"),
NodeItem("CompositorNodeDBlur"),
NodeItem("CompositorNodePixelate"),
NodeItem("CompositorNodeSunBeams"),
NodeItem("CompositorNodeDenoise"),
NodeItem("CompositorNodePython"),
NodeItem("CompositorNodeRecogniObjectID"),
]),
CompositorNodeCategory("CMP_OP_VECTOR", "Vector", items=[
NodeItem("CompositorNodeNormal"),
NodeItem("CompositorNodeMapValue"),
NodeItem("CompositorNodeMapRange"),
NodeItem("CompositorNodeNormalize"),
NodeItem("CompositorNodeCurveVec"),
]),
CompositorNodeCategory("CMP_MATTE", "Matte", items=[
NodeItem("CompositorNodeKeying"),
NodeItem("CompositorNodeKeyingScreen"),
NodeItem("CompositorNodeChannelMatte"),
NodeItem("CompositorNodeColorSpill"),
NodeItem("CompositorNodeBoxMask"),
NodeItem("CompositorNodeEllipseMask"),
NodeItem("CompositorNodeLumaMatte"),
NodeItem("CompositorNodeDiffMatte"),
NodeItem("CompositorNodeDistanceMatte"),
NodeItem("CompositorNodeChromaMatte"),
NodeItem("CompositorNodeColorMatte"),
NodeItem("CompositorNodeDoubleEdgeMask"),
NodeItem("CompositorNodeCryptomatte"),
NodeItem("CompositorNodeCryptomatteV2"),
]),
CompositorNodeCategory("CMP_DISTORT", "Distort", items=[
NodeItem("CompositorNodeScale"),
NodeItem("CompositorNodeLensdist"),
NodeItem("CompositorNodeMovieDistortion"),
NodeItem("CompositorNodeTranslate"),
NodeItem("CompositorNodeRotate"),
NodeItem("CompositorNodeFlip"),
NodeItem("CompositorNodeCrop"),
NodeItem("CompositorNodeDisplace"),
NodeItem("CompositorNodeMapUV"),
NodeItem("CompositorNodeTransform"),
NodeItem("CompositorNodeStabilize"),
NodeItem("CompositorNodePlaneTrackDeform"),
NodeItem("CompositorNodeCornerPin"),
]),
CompositorNodeCategory("CMP_GROUP", "Group", items=node_group_items),
CompositorNodeCategory("CMP_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
NodeItem("CompositorNodeSwitch"),
]),
]
texture_node_categories = [
TextureNodeCategory("TEX_INPUT", "Input", items=[
NodeItem("TextureNodeCurveTime"),
NodeItem("TextureNodeCoordinates"),
NodeItem("TextureNodeTexture"),
NodeItem("TextureNodeImage"),
]),
TextureNodeCategory("TEX_OUTPUT", "Output", items=[
NodeItem("TextureNodeOutput"),
NodeItem("TextureNodeViewer"),
]),
TextureNodeCategory("TEX_OP_COLOR", "Color", items=[
NodeItem("TextureNodeMixRGB"),
NodeItem("TextureNodeCurveRGB"),
NodeItem("TextureNodeInvert"),
NodeItem("TextureNodeHueSaturation"),
NodeItem("TextureNodeCompose"),
NodeItem("TextureNodeDecompose"),
]),
TextureNodeCategory("TEX_PATTERN", "Pattern", items=[
NodeItem("TextureNodeChecker"),
NodeItem("TextureNodeBricks"),
]),
TextureNodeCategory("TEX_TEXTURE", "Textures", items=[
NodeItem("TextureNodeTexNoise"),
NodeItem("TextureNodeTexDistNoise"),
NodeItem("TextureNodeTexClouds"),
NodeItem("TextureNodeTexBlend"),
NodeItem("TextureNodeTexVoronoi"),
NodeItem("TextureNodeTexMagic"),
NodeItem("TextureNodeTexMarble"),
NodeItem("TextureNodeTexWood"),
NodeItem("TextureNodeTexMusgrave"),
NodeItem("TextureNodeTexStucci"),
]),
TextureNodeCategory("TEX_CONVERTOR", "Converter", items=[
NodeItem("TextureNodeMath"),
NodeItem("TextureNodeValToRGB"),
NodeItem("TextureNodeRGBToBW"),
NodeItem("TextureNodeValToNor"),
NodeItem("TextureNodeDistance"),
]),
TextureNodeCategory("TEX_DISTORT", "Distort", items=[
NodeItem("TextureNodeScale"),
NodeItem("TextureNodeTranslate"),
NodeItem("TextureNodeRotate"),
NodeItem("TextureNodeAt"),
]),
TextureNodeCategory("TEX_GROUP", "Group", items=node_group_items),
TextureNodeCategory("TEX_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
]),
]
def not_implemented_node(idname):
NodeType = getattr(bpy.types, idname)
name = NodeType.bl_rna.name
label = "%s (mockup)" % name
return NodeItem(idname, label=label)
geometry_node_categories = [
GeometryNodeCategory("GEO_ATTRIBUTE", "Attribute", items=[
NodeItem("GeometryNodeAttributeRandomize"),
NodeItem("GeometryNodeAttributeMath"),
NodeItem("GeometryNodeAttributeCompare"),
NodeItem("GeometryNodeAttributeConvert"),
NodeItem("GeometryNodeAttributeFill"),
NodeItem("GeometryNodeAttributeMix"),
NodeItem("GeometryNodeAttributeProximity"),
NodeItem("GeometryNodeAttributeColorRamp"),
NodeItem("GeometryNodeAttributeVectorMath"),
NodeItem("GeometryNodeAttributeSampleTexture"),
NodeItem("GeometryNodeAttributeCombineXYZ"),
NodeItem("GeometryNodeAttributeSeparateXYZ"),
NodeItem("GeometryNodeAttributeRemove"),
]),
GeometryNodeCategory("GEO_COLOR", "Color", items=[
NodeItem("ShaderNodeValToRGB"),
NodeItem("ShaderNodeSeparateRGB"),
NodeItem("ShaderNodeCombineRGB"),
]),
GeometryNodeCategory("GEO_GEOMETRY", "Geometry", items=[
NodeItem("GeometryNodeTransform"),
NodeItem("GeometryNodeJoinGeometry"),
]),
GeometryNodeCategory("GEO_INPUT", "Input", items=[
NodeItem("GeometryNodeObjectInfo"),
NodeItem("GeometryNodeCollectionInfo"),
NodeItem("FunctionNodeRandomFloat"),
NodeItem("ShaderNodeValue"),
NodeItem("FunctionNodeInputString"),
NodeItem("FunctionNodeInputVector"),
NodeItem("GeometryNodeIsViewport"),
]),
GeometryNodeCategory("GEO_MESH", "Mesh", items=[
NodeItem("GeometryNodeBoolean"),
NodeItem("GeometryNodeTriangulate"),
NodeItem("GeometryNodeEdgeSplit"),
NodeItem("GeometryNodeSubdivisionSurface"),
NodeItem("GeometryNodeSubdivide"),
NodeItem("GeometryNodeMeshCube"),
NodeItem("GeometryNodeMeshCircle"),
NodeItem("GeometryNodeMeshUVSphere"),
NodeItem("GeometryNodeMeshIcoSphere"),
NodeItem("GeometryNodeMeshCylinder"),
NodeItem("GeometryNodeMeshCone"),
NodeItem("GeometryNodeMeshLine"),
NodeItem("GeometryNodeMeshPlane"),
]),
GeometryNodeCategory("GEO_POINT", "Point", items=[
NodeItem("GeometryNodePointDistribute"),
NodeItem("GeometryNodePointInstance"),
NodeItem("GeometryNodePointSeparate"),
NodeItem("GeometryNodePointScale"),
NodeItem("GeometryNodePointTranslate"),
NodeItem("GeometryNodeRotatePoints"),
NodeItem("GeometryNodeAlignRotationToVector"),
]),
GeometryNodeCategory("GEO_VOLUME", "Volume", items=[
NodeItem("GeometryNodePointsToVolume"),
NodeItem("GeometryNodeVolumeToMesh"),
]),
GeometryNodeCategory("GEO_UTILITIES", "Utilities", items=[
NodeItem("ShaderNodeMapRange"),
NodeItem("ShaderNodeClamp"),
NodeItem("ShaderNodeMath"),
NodeItem("FunctionNodeBooleanMath"),
NodeItem("FunctionNodeFloatCompare"),
]),
GeometryNodeCategory("GEO_VECTOR", "Vector", items=[
NodeItem("ShaderNodeSeparateXYZ"),
NodeItem("ShaderNodeCombineXYZ"),
NodeItem("ShaderNodeVectorMath"),
NodeItem("ShaderNodeVectorRotate"),
]),
GeometryNodeCategory("GEO_GROUP", "Group", items=node_group_items),
GeometryNodeCategory("GEO_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
]),
]
def register():
nodeitems_utils.register_node_categories('SHADER', shader_node_categories)
nodeitems_utils.register_node_categories('COMPOSITING', compositor_node_categories)
nodeitems_utils.register_node_categories('TEXTURE', texture_node_categories)
nodeitems_utils.register_node_categories('GEOMETRY', geometry_node_categories)
def unregister():
nodeitems_utils.unregister_node_categories('SHADER')
nodeitems_utils.unregister_node_categories('COMPOSITING')
nodeitems_utils.unregister_node_categories('TEXTURE')
nodeitems_utils.unregister_node_categories('GEOMETRY')
if __name__ == "__main__":
register()
| true | true |
f7ff5831ec205a844c057243e3ba61ed22c3e776 | 12,080 | py | Python | ml_testbench_dashboard/testbench.py | Pablololo12/ML_playground | 30e98e6fa3af2020779cc9625d6609f7162f34ee | [
"MIT"
] | null | null | null | ml_testbench_dashboard/testbench.py | Pablololo12/ML_playground | 30e98e6fa3af2020779cc9625d6609f7162f34ee | [
"MIT"
] | null | null | null | ml_testbench_dashboard/testbench.py | Pablololo12/ML_playground | 30e98e6fa3af2020779cc9625d6609f7162f34ee | [
"MIT"
] | 1 | 2020-04-01T16:07:25.000Z | 2020-04-01T16:07:25.000Z | #!/usr/bin/env python3
# Copyright (c) 2019, ARM Limited and Contributors
#
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import csv
import json
import os
from random import random
import subprocess
import sys
import yaml
# Default values
MODEL_FOLDER = "/data/local/tmp/models/"
BENCH_BIN_PATH = "/data/local/tmp/binaries/"
TFLITE_BIN = "benchmark_model"
ARMNN_BIN = "ExecuteNetwork"
NUM_LOOPS = 10
NUM_THREADS = [4]
WHERE_EXEC = ['cpu']
def execute_command(comm, shell=False):
if shell:
comm = ["adb", "shell"] + comm
process = subprocess.run(
comm, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
return process.returncode, process.stdout, process.stderr
# Uploads the file only if it does not exist
def upload_if(localP, remoteP, executable=False):
r, o, e = execute_command(["ls", remoteP], shell=True)
if r==0:
return 0 #because the file exist
r, o, e = execute_command(["adb", "push", localP, remoteP])
if r!=0:
print("Error: Uploading file " + localP)
return 1
if not executable:
return 0
r, o, e = execute_command(["chmod", "u+x", remoteP], shell=True)
if r!=0:
print("Error: Making it executable")
return 1
return 0
def upload_executable():
print("Uploading TFLite executable...", end='', flush=True)
r, o, e = execute_command(["mkdir", "-p", BENCH_BIN_PATH], shell=True)
if r!=0:
print("Error: There was a problem with mkdir binaries folder")
print(o, e)
return 1
dirs = os.listdir("binaries")
for fil in dirs:
ret = upload_if(
os.path.join("binaries", fil), BENCH_BIN_PATH+fil, executable=True)
if ret!=0:
print("Error: There was a problem uploading "+fil)
return 1
r, o, e = execute_command(["mkdir", "-p", MODEL_FOLDER], shell=True)
if r!=0:
print("Error: There was a problem with mkdir model folder")
return 1
print("Done")
return 0
# Execute the TFLite benchmark
def bench_exec(file, where, thr, loops):
opt = ""
if where == "gpu":
opt = "--use_gpu=true"
elif where == "nnapi":
opt = "--use_nnapi=true"
r, o, e = execute_command([BENCH_BIN_PATH + TFLITE_BIN,
"--graph=" + MODEL_FOLDER + file,
"--num_runs=" + str(loops),
"--num_threads=" + str(thr),
"--enable_op_profiling=true",
opt], shell=True)
if r!=0:
print("Error executing " + file + " on " + where + " mode")
print(o)
print(e)
return None, None
return o, e
# Parser for the TFLite output
def parse(doc):
doc = doc.split('\n')
in_avg = False
fir = False
l_s = 0
l_e = 0
for i in range(len(doc)):
line = doc[i]
if len(line) == 0:
continue
if "Average" == line[0:7]:
l_s = i + 2
in_avg = True
if line[0] == "=" and in_avg and fir:
l_e = i
in_avg = False
if line[0] == "=" and in_avg and not fir:
fir = True
table = doc[l_s + 1:l_e]
if len(table)==0:
print("\nError extracting results table")
return [], None
x = csv.reader(table, delimiter="\t")
li = []
mean_time = 0.0
for row in x:
if len(row) < 4:
continue
row = [x for x in row if x]
t = float(row[3].strip())
mean_time = mean_time + t
li.append({"layer":row[0].strip(), "time":t})
return li, mean_time
# Parser for the ArmNN output
def parse_results(inp):
st = ""
lines = inp.split('\n')
is_json = 0
for line in lines:
if len(line) == 0:
continue
if line[0] == '{':
is_json = is_json + 1
if is_json != 0:
st = st + line
if line[0] == '}':
is_json = is_json - 1
if st == "":
return None
data = json.loads(st)
data = data["ArmNN"]
outd = {}
k = list(data.keys())
if len(k) > 1:
print("There is more than 1 run")
data = data[k[0]]
if not "Execute_#2" in data:
print("Execution not found")
return None
data = data["Execute_#2"]
outd["type"] = "ArmNN"
outd["mean_time"] = data["Wall clock time_#2"]["raw"][0] / 1000.0
outd["times"] = []
keys = list(data.keys())
for k in keys[4:]:
ent = data[k]
ks = list(ent.keys())
outd["times"].append(
{"layer": k, "time": ent[ks[1]]["raw"][0] / 1000.0})
return outd
def execute_tflite(work_config, model):
TH = NUM_THREADS
WH = WHERE_EXEC
LP = NUM_LOOPS
out = []
model = os.path.split(model)[-1]
if 'threads' in work_config.keys():
TH = work_config['threads']
if 'options' in work_config.keys():
WH = work_config['options']
if 'loops' in work_config.keys():
LP = work_config['loops']
for T in TH:
for W in WH:
ret, err = bench_exec(model, W, T, LP)
if ret is not None:
ret = ret.decode('ASCII')
li, m_t = parse(ret)
if len(li) == 0:
print("Error")
continue
out.append(
{'type':W + "_" + str(T) + "Threads", "mean_time": m_t,
"times": li, "threads": T})
return out
def execute_armnn(conf, model):
LP = 1
shap = []
inName = ""
outName = ""
concurrent = " "
quant = " "
turbo = " "
acc = " "
model = os.path.split(model)[-1]
# Handle options
if 'input_shape' not in conf.keys():
print("Error: Input shape required on ArmNN")
return None
if 'input_name' not in conf.keys() or 'output_name' not in conf.keys():
print("Error: Input and Output names required")
return None
shap = conf['input_shape']
inName = conf['input_name']
outName = conf['output_name']
if 'concurrent' in conf.keys():
if conf['concurrent']:
concurrent = "-n"
if 'quantized' in conf.keys():
if conf['quantized']:
quant = "-q"
if 'fp16' in conf.keys():
if conf['fp16']:
turbo = "-h"
if 'loops' in conf.keys():
LP = conf['loops']
if 'accelerator' in conf.keys():
if 'Gpu' == conf['accelerator']:
acc = "-c GpuAcc"
if 'Cpu' == conf['accelerator']:
acc = "-c CpuAcc"
with open("temp_input_file","w") as f:
t = 1
for s in shap:
t = t * s
for i in range(t):
f.write(str(random()) + "\n")
upload_if("temp_input_file", MODEL_FOLDER + "intemp")
out = None
for i in range(LP):
r, o, e = execute_command(["export LD_LIBRARY_PATH=" + BENCH_BIN_PATH,
"&&",
BENCH_BIN_PATH + ARMNN_BIN,
concurrent,
quant,
turbo,
"-e",
"-f tflite-binary",
"-m " + MODEL_FOLDER + model,
"-i " + inName,
"-o " + outName,
acc,
"-c CpuRef",
"-d " + MODEL_FOLDER + "intemp"],
shell = True)
if r != 0:
print("Error: Executing armnn")
print(o)
print(e)
break
o = o.decode('ascii')
o = parse_results(o)
if o == None:
print("Error: Parsing ArmNN")
break
if out == None:
out = o
else:
out['mean_time'] = out['mean_time']+o['mean_time']
for i in range(len(out['times'])):
out['times'][i]['time'] = (out['times'][i]['time']
+ o['times'][i]['time'])
if out != None:
out['mean_time'] = out['mean_time'] / LP
for i in range(len(out['times'])):
out['times'][i]['time'] = out['times'][i]['time'] / LP
r, o, e = execute_command(["rm",MODEL_FOLDER+"intemp"], shell = True)
return out
def loop_workloads(data):
if 'workloads' not in data.keys():
print("Error: Parsing yaml")
return 1
r = upload_executable()
if r != 0:
return 1
dic = {}
for work in data['workloads']:
if 'model' not in work.keys():
print("Error: Model option not found")
continue
name = os.path.split(work['model'])[-1]
if 'name' in work.keys():
print("\tRunning workload " + work['name'])
else:
print("\tRunning workload " + name)
upload_if(
os.path.join(
*list(os.path.split(work['model']))),
MODEL_FOLDER + name)
res = []
if 'tflite' in work.keys():
print("\t\tExecuting with tflite")
out = execute_tflite(work['tflite'], work['model'])
res.extend(out)
if 'armnn' in work.keys():
print("\t\tExecuting with armnn")
out = execute_armnn(work['armnn'], work['model'])
if out != None:
res.append(out)
if 'name' in work.keys():
name = work['name']
dic[name] = res
r, o, e = execute_command(["rm", "-r", BENCH_BIN_PATH], shell = True)
r, o, e = execute_command(["rm", "-r", MODEL_FOLDER], shell = True)
return dic
def main(args):
if len(args) < 2:
print("Error: Argument error")
print("Usage:\n./testbench.py [yaml file with the config] | -h")
sys.exit(128)
if args[1] == '-h':
print("Usage:\n./testbench.py [yaml file with the config] | -h")
return
if not os.path.isfile(args[1]):
print("Error: File not found")
sys.exit(128)
with open(args[1], 'r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
out = loop_workloads(data)
outfile = "results.json"
if 'global' in data.keys():
if 'outputfile' in data['global'].keys():
outfile = data['global']['outputfile']
print("Writting output file...", end='', flush=True)
with open(outfile, "w") as f:
json.dump(out, f)
print("Done")
if __name__ == "__main__":
main(sys.argv) | 33.462604 | 81 | 0.507202 |
import csv
import json
import os
from random import random
import subprocess
import sys
import yaml
MODEL_FOLDER = "/data/local/tmp/models/"
BENCH_BIN_PATH = "/data/local/tmp/binaries/"
TFLITE_BIN = "benchmark_model"
ARMNN_BIN = "ExecuteNetwork"
NUM_LOOPS = 10
NUM_THREADS = [4]
WHERE_EXEC = ['cpu']
def execute_command(comm, shell=False):
if shell:
comm = ["adb", "shell"] + comm
process = subprocess.run(
comm, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
return process.returncode, process.stdout, process.stderr
def upload_if(localP, remoteP, executable=False):
r, o, e = execute_command(["ls", remoteP], shell=True)
if r==0:
return 0
r, o, e = execute_command(["adb", "push", localP, remoteP])
if r!=0:
print("Error: Uploading file " + localP)
return 1
if not executable:
return 0
r, o, e = execute_command(["chmod", "u+x", remoteP], shell=True)
if r!=0:
print("Error: Making it executable")
return 1
return 0
def upload_executable():
print("Uploading TFLite executable...", end='', flush=True)
r, o, e = execute_command(["mkdir", "-p", BENCH_BIN_PATH], shell=True)
if r!=0:
print("Error: There was a problem with mkdir binaries folder")
print(o, e)
return 1
dirs = os.listdir("binaries")
for fil in dirs:
ret = upload_if(
os.path.join("binaries", fil), BENCH_BIN_PATH+fil, executable=True)
if ret!=0:
print("Error: There was a problem uploading "+fil)
return 1
r, o, e = execute_command(["mkdir", "-p", MODEL_FOLDER], shell=True)
if r!=0:
print("Error: There was a problem with mkdir model folder")
return 1
print("Done")
return 0
def bench_exec(file, where, thr, loops):
opt = ""
if where == "gpu":
opt = "--use_gpu=true"
elif where == "nnapi":
opt = "--use_nnapi=true"
r, o, e = execute_command([BENCH_BIN_PATH + TFLITE_BIN,
"--graph=" + MODEL_FOLDER + file,
"--num_runs=" + str(loops),
"--num_threads=" + str(thr),
"--enable_op_profiling=true",
opt], shell=True)
if r!=0:
print("Error executing " + file + " on " + where + " mode")
print(o)
print(e)
return None, None
return o, e
def parse(doc):
doc = doc.split('\n')
in_avg = False
fir = False
l_s = 0
l_e = 0
for i in range(len(doc)):
line = doc[i]
if len(line) == 0:
continue
if "Average" == line[0:7]:
l_s = i + 2
in_avg = True
if line[0] == "=" and in_avg and fir:
l_e = i
in_avg = False
if line[0] == "=" and in_avg and not fir:
fir = True
table = doc[l_s + 1:l_e]
if len(table)==0:
print("\nError extracting results table")
return [], None
x = csv.reader(table, delimiter="\t")
li = []
mean_time = 0.0
for row in x:
if len(row) < 4:
continue
row = [x for x in row if x]
t = float(row[3].strip())
mean_time = mean_time + t
li.append({"layer":row[0].strip(), "time":t})
return li, mean_time
def parse_results(inp):
st = ""
lines = inp.split('\n')
is_json = 0
for line in lines:
if len(line) == 0:
continue
if line[0] == '{':
is_json = is_json + 1
if is_json != 0:
st = st + line
if line[0] == '}':
is_json = is_json - 1
if st == "":
return None
data = json.loads(st)
data = data["ArmNN"]
outd = {}
k = list(data.keys())
if len(k) > 1:
print("There is more than 1 run")
data = data[k[0]]
if not "Execute_#2" in data:
print("Execution not found")
return None
data = data["Execute_#2"]
outd["type"] = "ArmNN"
outd["mean_time"] = data["Wall clock time_#2"]["raw"][0] / 1000.0
outd["times"] = []
keys = list(data.keys())
for k in keys[4:]:
ent = data[k]
ks = list(ent.keys())
outd["times"].append(
{"layer": k, "time": ent[ks[1]]["raw"][0] / 1000.0})
return outd
def execute_tflite(work_config, model):
TH = NUM_THREADS
WH = WHERE_EXEC
LP = NUM_LOOPS
out = []
model = os.path.split(model)[-1]
if 'threads' in work_config.keys():
TH = work_config['threads']
if 'options' in work_config.keys():
WH = work_config['options']
if 'loops' in work_config.keys():
LP = work_config['loops']
for T in TH:
for W in WH:
ret, err = bench_exec(model, W, T, LP)
if ret is not None:
ret = ret.decode('ASCII')
li, m_t = parse(ret)
if len(li) == 0:
print("Error")
continue
out.append(
{'type':W + "_" + str(T) + "Threads", "mean_time": m_t,
"times": li, "threads": T})
return out
def execute_armnn(conf, model):
LP = 1
shap = []
inName = ""
outName = ""
concurrent = " "
quant = " "
turbo = " "
acc = " "
model = os.path.split(model)[-1]
if 'input_shape' not in conf.keys():
print("Error: Input shape required on ArmNN")
return None
if 'input_name' not in conf.keys() or 'output_name' not in conf.keys():
print("Error: Input and Output names required")
return None
shap = conf['input_shape']
inName = conf['input_name']
outName = conf['output_name']
if 'concurrent' in conf.keys():
if conf['concurrent']:
concurrent = "-n"
if 'quantized' in conf.keys():
if conf['quantized']:
quant = "-q"
if 'fp16' in conf.keys():
if conf['fp16']:
turbo = "-h"
if 'loops' in conf.keys():
LP = conf['loops']
if 'accelerator' in conf.keys():
if 'Gpu' == conf['accelerator']:
acc = "-c GpuAcc"
if 'Cpu' == conf['accelerator']:
acc = "-c CpuAcc"
with open("temp_input_file","w") as f:
t = 1
for s in shap:
t = t * s
for i in range(t):
f.write(str(random()) + "\n")
upload_if("temp_input_file", MODEL_FOLDER + "intemp")
out = None
for i in range(LP):
r, o, e = execute_command(["export LD_LIBRARY_PATH=" + BENCH_BIN_PATH,
"&&",
BENCH_BIN_PATH + ARMNN_BIN,
concurrent,
quant,
turbo,
"-e",
"-f tflite-binary",
"-m " + MODEL_FOLDER + model,
"-i " + inName,
"-o " + outName,
acc,
"-c CpuRef",
"-d " + MODEL_FOLDER + "intemp"],
shell = True)
if r != 0:
print("Error: Executing armnn")
print(o)
print(e)
break
o = o.decode('ascii')
o = parse_results(o)
if o == None:
print("Error: Parsing ArmNN")
break
if out == None:
out = o
else:
out['mean_time'] = out['mean_time']+o['mean_time']
for i in range(len(out['times'])):
out['times'][i]['time'] = (out['times'][i]['time']
+ o['times'][i]['time'])
if out != None:
out['mean_time'] = out['mean_time'] / LP
for i in range(len(out['times'])):
out['times'][i]['time'] = out['times'][i]['time'] / LP
r, o, e = execute_command(["rm",MODEL_FOLDER+"intemp"], shell = True)
return out
def loop_workloads(data):
if 'workloads' not in data.keys():
print("Error: Parsing yaml")
return 1
r = upload_executable()
if r != 0:
return 1
dic = {}
for work in data['workloads']:
if 'model' not in work.keys():
print("Error: Model option not found")
continue
name = os.path.split(work['model'])[-1]
if 'name' in work.keys():
print("\tRunning workload " + work['name'])
else:
print("\tRunning workload " + name)
upload_if(
os.path.join(
*list(os.path.split(work['model']))),
MODEL_FOLDER + name)
res = []
if 'tflite' in work.keys():
print("\t\tExecuting with tflite")
out = execute_tflite(work['tflite'], work['model'])
res.extend(out)
if 'armnn' in work.keys():
print("\t\tExecuting with armnn")
out = execute_armnn(work['armnn'], work['model'])
if out != None:
res.append(out)
if 'name' in work.keys():
name = work['name']
dic[name] = res
r, o, e = execute_command(["rm", "-r", BENCH_BIN_PATH], shell = True)
r, o, e = execute_command(["rm", "-r", MODEL_FOLDER], shell = True)
return dic
def main(args):
if len(args) < 2:
print("Error: Argument error")
print("Usage:\n./testbench.py [yaml file with the config] | -h")
sys.exit(128)
if args[1] == '-h':
print("Usage:\n./testbench.py [yaml file with the config] | -h")
return
if not os.path.isfile(args[1]):
print("Error: File not found")
sys.exit(128)
with open(args[1], 'r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
out = loop_workloads(data)
outfile = "results.json"
if 'global' in data.keys():
if 'outputfile' in data['global'].keys():
outfile = data['global']['outputfile']
print("Writting output file...", end='', flush=True)
with open(outfile, "w") as f:
json.dump(out, f)
print("Done")
if __name__ == "__main__":
main(sys.argv) | true | true |
f7ff58c44a935764998ddc31eb4fd527ffb44331 | 622 | py | Python | wellcad/com/_comment_box.py | ArnaudCrl/pywellcad | 770ac70cd1c20d06e4590979976498f75c9db91e | [
"BSD-3-Clause"
] | 6 | 2022-02-18T06:28:43.000Z | 2022-03-24T18:54:18.000Z | wellcad/com/_comment_box.py | ArnaudCrl/pywellcad | 770ac70cd1c20d06e4590979976498f75c9db91e | [
"BSD-3-Clause"
] | 85 | 2022-02-17T13:41:06.000Z | 2022-03-04T14:34:18.000Z | wellcad/com/_comment_box.py | ArnaudCrl/pywellcad | 770ac70cd1c20d06e4590979976498f75c9db91e | [
"BSD-3-Clause"
] | 2 | 2022-02-10T09:01:21.000Z | 2022-03-12T02:41:32.000Z | from ._dispatch_wrapper import DispatchWrapper
class CommentBox(DispatchWrapper):
@property
def top_depth(self):
"""float: The top depth of the comment box in current depth
units."""
return self._dispatch.TopDepth
@property
def bottom_depth(self):
"""float: The bottom depth of the comment box in current depth
units."""
return self._dispatch.BottomDepth
@property
def text(self):
"""str: The text of the comment box."""
return self._dispatch.Text
@text.setter
def text(self, value):
self._dispatch.Text = value
| 23.923077 | 70 | 0.639871 | from ._dispatch_wrapper import DispatchWrapper
class CommentBox(DispatchWrapper):
@property
def top_depth(self):
return self._dispatch.TopDepth
@property
def bottom_depth(self):
return self._dispatch.BottomDepth
@property
def text(self):
return self._dispatch.Text
@text.setter
def text(self, value):
self._dispatch.Text = value
| true | true |
f7ff58e364652074db91b1ef4d1e0055852b1294 | 6,602 | py | Python | espnet2/samplers/num_elements_batch_sampler.py | ana-kuznetsova/espnet | 263a9ba04b626fa46442d6679531ce98c7afa9df | [
"Apache-2.0"
] | 1 | 2021-07-12T17:48:24.000Z | 2021-07-12T17:48:24.000Z | espnet2/samplers/num_elements_batch_sampler.py | ana-kuznetsova/espnet | 263a9ba04b626fa46442d6679531ce98c7afa9df | [
"Apache-2.0"
] | null | null | null | espnet2/samplers/num_elements_batch_sampler.py | ana-kuznetsova/espnet | 263a9ba04b626fa46442d6679531ce98c7afa9df | [
"Apache-2.0"
] | null | null | null | from typing import Iterator
from typing import List
from typing import Tuple
from typing import Union
import numpy as np
from typeguard import check_argument_types
from espnet2.fileio.read_text import load_num_sequence_text
from espnet2.samplers.abs_sampler import AbsSampler
class NumElementsBatchSampler(AbsSampler):
def __init__(
self,
batch_bins: int,
shape_files: Union[Tuple[str, ...], List[str]],
min_batch_size: int = 1,
sort_in_batch: str = "descending",
sort_batch: str = "ascending",
drop_last: bool = False,
padding: bool = True,
):
assert check_argument_types()
assert batch_bins > 0
if sort_batch != "ascending" and sort_batch != "descending":
raise ValueError(
f"sort_batch must be ascending or descending: {sort_batch}"
)
if sort_in_batch != "descending" and sort_in_batch != "ascending":
raise ValueError(
f"sort_in_batch must be ascending or descending: {sort_in_batch}"
)
self.batch_bins = batch_bins
self.shape_files = shape_files
self.sort_in_batch = sort_in_batch
self.sort_batch = sort_batch
self.drop_last = drop_last
# utt2shape: (Length, ...)
# uttA 100,...
# uttB 201,...
utt2shapes = [
load_num_sequence_text(s, loader_type="csv_int") for s in shape_files
]
first_utt2shape = utt2shapes[0]
for s, d in zip(shape_files, utt2shapes):
if set(d) != set(first_utt2shape):
raise RuntimeError(
f"keys are mismatched between {s} != {shape_files[0]}"
)
#JD - fix nan grad issue by filtering utterances where the length of the text in tokens
# is less than the length of the audio, downsampled by a factor of 4
tmp_utt2shapes_0 = dict()
tmp_utt2shapes_1 = dict()
for k in first_utt2shape:
# assuming that the first shape file is speech shape, second is text shape
# this order is hard-coded into asr.sh in the TEMPLATE experiment
if utt2shapes[1][k][0]+1 < utt2shapes[0][k][0]//5:
tmp_utt2shapes_0[k] = utt2shapes[0][k]
tmp_utt2shapes_1[k] = utt2shapes[1][k]
num_filtered = len(first_utt2shape) - len(tmp_utt2shapes_0)
print("filtered " + str(num_filtered) + " utterances out of " + str(len(first_utt2shape)), flush=True)
utt2shapes = [tmp_utt2shapes_0, tmp_utt2shapes_1]
first_utt2shape = tmp_utt2shapes_0
# Sort samples in ascending order
# (shape order should be like (Length, Dim))
keys = sorted(first_utt2shape, key=lambda k: first_utt2shape[k][0])
if len(keys) == 0:
raise RuntimeError(f"0 lines found: {shape_files[0]}")
if padding:
# If padding case, the feat-dim must be same over whole corpus,
# therefore the first sample is referred
feat_dims = [np.prod(d[keys[0]][1:]) for d in utt2shapes]
else:
feat_dims = None
# Decide batch-sizes
batch_sizes = []
current_batch_keys = []
for key in keys:
current_batch_keys.append(key)
# shape: (Length, dim1, dim2, ...)
if padding:
for d, s in zip(utt2shapes, shape_files):
if tuple(d[key][1:]) != tuple(d[keys[0]][1:]):
raise RuntimeError(
"If padding=True, the "
f"feature dimension must be unified: {s}",
)
bins = sum(
len(current_batch_keys) * sh[key][0] * d
for sh, d in zip(utt2shapes, feat_dims)
)
else:
bins = sum(
np.prod(d[k]) for k in current_batch_keys for d in utt2shapes
)
if bins > batch_bins and len(current_batch_keys) >= min_batch_size:
batch_sizes.append(len(current_batch_keys))
current_batch_keys = []
else:
if len(current_batch_keys) != 0 and (
not self.drop_last or len(batch_sizes) == 0
):
batch_sizes.append(len(current_batch_keys))
if len(batch_sizes) == 0:
# Maybe we can't reach here
raise RuntimeError("0 batches")
# If the last batch-size is smaller than minimum batch_size,
# the samples are redistributed to the other mini-batches
if len(batch_sizes) > 1 and batch_sizes[-1] < min_batch_size:
for i in range(batch_sizes.pop(-1)):
batch_sizes[-(i % len(batch_sizes)) - 1] += 1
if not self.drop_last:
# Bug check
assert sum(batch_sizes) == len(keys), f"{sum(batch_sizes)} != {len(keys)}"
# Set mini-batch
self.batch_list = []
iter_bs = iter(batch_sizes)
bs = next(iter_bs)
minibatch_keys = []
for key in keys:
minibatch_keys.append(key)
if len(minibatch_keys) == bs:
if sort_in_batch == "descending":
minibatch_keys.reverse()
elif sort_in_batch == "ascending":
# Key are already sorted in ascending
pass
else:
raise ValueError(
"sort_in_batch must be ascending"
f" or descending: {sort_in_batch}"
)
self.batch_list.append(tuple(minibatch_keys))
minibatch_keys = []
try:
bs = next(iter_bs)
except StopIteration:
break
if sort_batch == "ascending":
pass
elif sort_batch == "descending":
self.batch_list.reverse()
else:
raise ValueError(
f"sort_batch must be ascending or descending: {sort_batch}"
)
def __repr__(self):
return (
f"{self.__class__.__name__}("
f"N-batch={len(self)}, "
f"batch_bins={self.batch_bins}, "
f"sort_in_batch={self.sort_in_batch}, "
f"sort_batch={self.sort_batch})"
)
def __len__(self):
return len(self.batch_list)
def __iter__(self) -> Iterator[Tuple[str, ...]]:
return iter(self.batch_list)
| 37.089888 | 110 | 0.547561 | from typing import Iterator
from typing import List
from typing import Tuple
from typing import Union
import numpy as np
from typeguard import check_argument_types
from espnet2.fileio.read_text import load_num_sequence_text
from espnet2.samplers.abs_sampler import AbsSampler
class NumElementsBatchSampler(AbsSampler):
def __init__(
self,
batch_bins: int,
shape_files: Union[Tuple[str, ...], List[str]],
min_batch_size: int = 1,
sort_in_batch: str = "descending",
sort_batch: str = "ascending",
drop_last: bool = False,
padding: bool = True,
):
assert check_argument_types()
assert batch_bins > 0
if sort_batch != "ascending" and sort_batch != "descending":
raise ValueError(
f"sort_batch must be ascending or descending: {sort_batch}"
)
if sort_in_batch != "descending" and sort_in_batch != "ascending":
raise ValueError(
f"sort_in_batch must be ascending or descending: {sort_in_batch}"
)
self.batch_bins = batch_bins
self.shape_files = shape_files
self.sort_in_batch = sort_in_batch
self.sort_batch = sort_batch
self.drop_last = drop_last
utt2shapes = [
load_num_sequence_text(s, loader_type="csv_int") for s in shape_files
]
first_utt2shape = utt2shapes[0]
for s, d in zip(shape_files, utt2shapes):
if set(d) != set(first_utt2shape):
raise RuntimeError(
f"keys are mismatched between {s} != {shape_files[0]}"
)
tmp_utt2shapes_0 = dict()
tmp_utt2shapes_1 = dict()
for k in first_utt2shape:
if utt2shapes[1][k][0]+1 < utt2shapes[0][k][0]//5:
tmp_utt2shapes_0[k] = utt2shapes[0][k]
tmp_utt2shapes_1[k] = utt2shapes[1][k]
num_filtered = len(first_utt2shape) - len(tmp_utt2shapes_0)
print("filtered " + str(num_filtered) + " utterances out of " + str(len(first_utt2shape)), flush=True)
utt2shapes = [tmp_utt2shapes_0, tmp_utt2shapes_1]
first_utt2shape = tmp_utt2shapes_0
keys = sorted(first_utt2shape, key=lambda k: first_utt2shape[k][0])
if len(keys) == 0:
raise RuntimeError(f"0 lines found: {shape_files[0]}")
if padding:
feat_dims = [np.prod(d[keys[0]][1:]) for d in utt2shapes]
else:
feat_dims = None
batch_sizes = []
current_batch_keys = []
for key in keys:
current_batch_keys.append(key)
if padding:
for d, s in zip(utt2shapes, shape_files):
if tuple(d[key][1:]) != tuple(d[keys[0]][1:]):
raise RuntimeError(
"If padding=True, the "
f"feature dimension must be unified: {s}",
)
bins = sum(
len(current_batch_keys) * sh[key][0] * d
for sh, d in zip(utt2shapes, feat_dims)
)
else:
bins = sum(
np.prod(d[k]) for k in current_batch_keys for d in utt2shapes
)
if bins > batch_bins and len(current_batch_keys) >= min_batch_size:
batch_sizes.append(len(current_batch_keys))
current_batch_keys = []
else:
if len(current_batch_keys) != 0 and (
not self.drop_last or len(batch_sizes) == 0
):
batch_sizes.append(len(current_batch_keys))
if len(batch_sizes) == 0:
raise RuntimeError("0 batches")
# If the last batch-size is smaller than minimum batch_size,
# the samples are redistributed to the other mini-batches
if len(batch_sizes) > 1 and batch_sizes[-1] < min_batch_size:
for i in range(batch_sizes.pop(-1)):
batch_sizes[-(i % len(batch_sizes)) - 1] += 1
if not self.drop_last:
# Bug check
assert sum(batch_sizes) == len(keys), f"{sum(batch_sizes)} != {len(keys)}"
# Set mini-batch
self.batch_list = []
iter_bs = iter(batch_sizes)
bs = next(iter_bs)
minibatch_keys = []
for key in keys:
minibatch_keys.append(key)
if len(minibatch_keys) == bs:
if sort_in_batch == "descending":
minibatch_keys.reverse()
elif sort_in_batch == "ascending":
# Key are already sorted in ascending
pass
else:
raise ValueError(
"sort_in_batch must be ascending"
f" or descending: {sort_in_batch}"
)
self.batch_list.append(tuple(minibatch_keys))
minibatch_keys = []
try:
bs = next(iter_bs)
except StopIteration:
break
if sort_batch == "ascending":
pass
elif sort_batch == "descending":
self.batch_list.reverse()
else:
raise ValueError(
f"sort_batch must be ascending or descending: {sort_batch}"
)
def __repr__(self):
return (
f"{self.__class__.__name__}("
f"N-batch={len(self)}, "
f"batch_bins={self.batch_bins}, "
f"sort_in_batch={self.sort_in_batch}, "
f"sort_batch={self.sort_batch})"
)
def __len__(self):
return len(self.batch_list)
def __iter__(self) -> Iterator[Tuple[str, ...]]:
return iter(self.batch_list)
| true | true |
f7ff58ef33cd371df2cf4d224f39cb9d6ceac190 | 10,774 | py | Python | core/domain/event_services.py | alexewu/oppia | 57c3c660ab7974835ec068d7c7f5ce5b5f1f25ae | [
"Apache-2.0"
] | null | null | null | core/domain/event_services.py | alexewu/oppia | 57c3c660ab7974835ec068d7c7f5ce5b5f1f25ae | [
"Apache-2.0"
] | 7 | 2019-08-20T08:30:43.000Z | 2022-02-12T18:47:57.000Z | core/domain/event_services.py | ledriod/oppia | 4f8f95c6689cd36f0b65672b80d98a3463b001f8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for handling events."""
import inspect
from core import jobs_registry
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import stats_domain
from core.domain import stats_services
from core.platform import models
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
import feconf
(stats_models, feedback_models) = models.Registry.import_models([
models.NAMES.statistics, models.NAMES.feedback])
taskqueue_services = models.Registry.import_taskqueue_services()
class BaseEventHandler(object):
"""Base class for event dispatchers."""
# A string denoting the type of the event. Should be specified by
# subclasses and considered immutable.
EVENT_TYPE = None
@classmethod
def _notify_continuous_computation_listeners_async(cls, *args, **kwargs):
"""Dispatch events asynchronously to continuous computation realtime
layers that are listening for them.
"""
taskqueue_services.defer(
jobs_registry.ContinuousComputationEventDispatcher.dispatch_event,
taskqueue_services.QUEUE_NAME_EVENTS, cls.EVENT_TYPE, *args,
**kwargs)
@classmethod
def _handle_event(cls, *args, **kwargs):
"""Perform in-request processing of an incoming event."""
raise NotImplementedError(
'Subclasses of BaseEventHandler should implement the '
'_handle_event() method, using explicit arguments '
'(no *args or **kwargs).')
@classmethod
def record(cls, *args, **kwargs):
"""Process incoming events.
Callers of event handlers should call this method, not _handle_event().
"""
cls._notify_continuous_computation_listeners_async(*args, **kwargs)
cls._handle_event(*args, **kwargs)
class StatsEventsHandler(BaseEventHandler):
"""Event handler for incremental update of analytics model using aggregated
stats data.
"""
EVENT_TYPE = feconf.EVENT_TYPE_ALL_STATS
@classmethod
def _is_latest_version(cls, exp_id, exp_version):
"""Verifies whether the exploration version for the stats to be stored
corresponds to the latest version of the exploration.
"""
exploration = exp_services.get_exploration_by_id(exp_id)
return exploration.version == exp_version
@classmethod
def _handle_event(cls, exploration_id, exp_version, aggregated_stats):
if cls._is_latest_version(exploration_id, exp_version):
taskqueue_services.defer(
stats_services.update_stats,
taskqueue_services.QUEUE_NAME_STATS, exploration_id,
exp_version, aggregated_stats)
class AnswerSubmissionEventHandler(BaseEventHandler):
"""Event handler for recording answer submissions."""
EVENT_TYPE = feconf.EVENT_TYPE_ANSWER_SUBMITTED
@classmethod
def _notify_continuous_computation_listeners_async(cls, *args, **kwargs):
# Disable this method until we can deal with large answers, otherwise
# the data that is being placed on the task queue is too large.
pass
@classmethod
def _handle_event(
cls, exploration_id, exploration_version, state_name,
interaction_id, answer_group_index, rule_spec_index,
classification_categorization, session_id, time_spent_in_secs,
params, normalized_answer):
"""Records an event when an answer triggers a rule. The answer recorded
here is a Python-representation of the actual answer submitted by the
user.
"""
# TODO(sll): Escape these args?
stats_services.record_answer(
exploration_id, exploration_version, state_name, interaction_id,
stats_domain.SubmittedAnswer(
normalized_answer, interaction_id, answer_group_index,
rule_spec_index, classification_categorization, params,
session_id, time_spent_in_secs))
feedback_is_useful = (
classification_categorization != (
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION))
stats_models.AnswerSubmittedEventLogEntryModel.create(
exploration_id, exploration_version, state_name, session_id,
time_spent_in_secs, feedback_is_useful)
class ExplorationActualStartEventHandler(BaseEventHandler):
"""Event handler for recording exploration actual start events."""
EVENT_TYPE = feconf.EVENT_TYPE_ACTUAL_START_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id):
stats_models.ExplorationActualStartEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id)
class SolutionHitEventHandler(BaseEventHandler):
"""Event handler for recording solution hit events."""
EVENT_TYPE = feconf.EVENT_TYPE_SOLUTION_HIT
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.SolutionHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class StartExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration start events."""
EVENT_TYPE = feconf.EVENT_TYPE_START_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, params,
play_type):
stats_models.StartExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, params,
play_type)
class MaybeLeaveExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration leave events."""
EVENT_TYPE = feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.MaybeLeaveExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class CompleteExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration completion events."""
EVENT_TYPE = feconf.EVENT_TYPE_COMPLETE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.CompleteExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class RateExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration rating events."""
EVENT_TYPE = feconf.EVENT_TYPE_RATE_EXPLORATION
@classmethod
def _handle_event(cls, exploration_id, user_id, rating, old_rating):
stats_models.RateExplorationEventLogEntryModel.create(
exploration_id, user_id, rating, old_rating)
class StateHitEventHandler(BaseEventHandler):
"""Event handler for recording state hit events."""
EVENT_TYPE = feconf.EVENT_TYPE_STATE_HIT
# TODO(sll): remove params before sending this event to the jobs taskqueue.
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
params, play_type):
stats_models.StateHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
params, play_type)
class StateCompleteEventHandler(BaseEventHandler):
"""Event handler for recording state complete events."""
EVENT_TYPE = feconf.EVENT_TYPE_STATE_COMPLETED
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.StateCompleteEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class LeaveForRefresherExpEventHandler(BaseEventHandler):
"""Event handler for recording "leave for refresher exploration" events."""
EVENT_TYPE = feconf.EVENT_TYPE_LEAVE_FOR_REFRESHER_EXP
@classmethod
def _handle_event(
cls, exp_id, refresher_exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.LeaveForRefresherExplorationEventLogEntryModel.create(
exp_id, refresher_exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class FeedbackThreadCreatedEventHandler(BaseEventHandler):
"""Event handler for recording new feedback thread creation events."""
EVENT_TYPE = feconf.EVENT_TYPE_NEW_THREAD_CREATED
@classmethod
def _handle_event(cls, exp_id):
pass
class FeedbackThreadStatusChangedEventHandler(BaseEventHandler):
"""Event handler for recording reopening feedback thread events."""
EVENT_TYPE = feconf.EVENT_TYPE_THREAD_STATUS_CHANGED
@classmethod
def _handle_event(cls, exp_id, old_status, new_status):
pass
class Registry(object):
"""Registry of event handlers."""
# Dict mapping event types to their classes.
_event_types_to_classes = {}
@classmethod
def _refresh_registry(cls):
"""Regenerates the event handler registry."""
cls._event_types_to_classes.clear()
# Find all subclasses of BaseEventHandler in the current module.
for obj_name, obj in globals().iteritems():
if inspect.isclass(obj) and issubclass(obj, BaseEventHandler):
if obj_name == 'BaseEventHandler':
continue
cls._event_types_to_classes[obj.EVENT_TYPE] = obj
@classmethod
def get_event_class_by_type(cls, event_type):
"""Gets an event handler class by its type.
Refreshes once if the event type is not found; subsequently, throws an
error.
"""
if event_type not in cls._event_types_to_classes:
cls._refresh_registry()
return cls._event_types_to_classes[event_type]
| 35.557756 | 80 | 0.713384 |
import inspect
from core import jobs_registry
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import stats_domain
from core.domain import stats_services
from core.platform import models
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
import feconf
(stats_models, feedback_models) = models.Registry.import_models([
models.NAMES.statistics, models.NAMES.feedback])
taskqueue_services = models.Registry.import_taskqueue_services()
class BaseEventHandler(object):
EVENT_TYPE = None
@classmethod
def _notify_continuous_computation_listeners_async(cls, *args, **kwargs):
taskqueue_services.defer(
jobs_registry.ContinuousComputationEventDispatcher.dispatch_event,
taskqueue_services.QUEUE_NAME_EVENTS, cls.EVENT_TYPE, *args,
**kwargs)
@classmethod
def _handle_event(cls, *args, **kwargs):
raise NotImplementedError(
'Subclasses of BaseEventHandler should implement the '
'_handle_event() method, using explicit arguments '
'(no *args or **kwargs).')
@classmethod
def record(cls, *args, **kwargs):
cls._notify_continuous_computation_listeners_async(*args, **kwargs)
cls._handle_event(*args, **kwargs)
class StatsEventsHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_ALL_STATS
@classmethod
def _is_latest_version(cls, exp_id, exp_version):
exploration = exp_services.get_exploration_by_id(exp_id)
return exploration.version == exp_version
@classmethod
def _handle_event(cls, exploration_id, exp_version, aggregated_stats):
if cls._is_latest_version(exploration_id, exp_version):
taskqueue_services.defer(
stats_services.update_stats,
taskqueue_services.QUEUE_NAME_STATS, exploration_id,
exp_version, aggregated_stats)
class AnswerSubmissionEventHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_ANSWER_SUBMITTED
@classmethod
def _notify_continuous_computation_listeners_async(cls, *args, **kwargs):
pass
@classmethod
def _handle_event(
cls, exploration_id, exploration_version, state_name,
interaction_id, answer_group_index, rule_spec_index,
classification_categorization, session_id, time_spent_in_secs,
params, normalized_answer):
stats_services.record_answer(
exploration_id, exploration_version, state_name, interaction_id,
stats_domain.SubmittedAnswer(
normalized_answer, interaction_id, answer_group_index,
rule_spec_index, classification_categorization, params,
session_id, time_spent_in_secs))
feedback_is_useful = (
classification_categorization != (
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION))
stats_models.AnswerSubmittedEventLogEntryModel.create(
exploration_id, exploration_version, state_name, session_id,
time_spent_in_secs, feedback_is_useful)
class ExplorationActualStartEventHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_ACTUAL_START_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id):
stats_models.ExplorationActualStartEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id)
class SolutionHitEventHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_SOLUTION_HIT
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.SolutionHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class StartExplorationEventHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_START_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, params,
play_type):
stats_models.StartExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, params,
play_type)
class MaybeLeaveExplorationEventHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.MaybeLeaveExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class CompleteExplorationEventHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_COMPLETE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.CompleteExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class RateExplorationEventHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_RATE_EXPLORATION
@classmethod
def _handle_event(cls, exploration_id, user_id, rating, old_rating):
stats_models.RateExplorationEventLogEntryModel.create(
exploration_id, user_id, rating, old_rating)
class StateHitEventHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_STATE_HIT
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
params, play_type):
stats_models.StateHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
params, play_type)
class StateCompleteEventHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_STATE_COMPLETED
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.StateCompleteEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class LeaveForRefresherExpEventHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_LEAVE_FOR_REFRESHER_EXP
@classmethod
def _handle_event(
cls, exp_id, refresher_exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.LeaveForRefresherExplorationEventLogEntryModel.create(
exp_id, refresher_exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class FeedbackThreadCreatedEventHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_NEW_THREAD_CREATED
@classmethod
def _handle_event(cls, exp_id):
pass
class FeedbackThreadStatusChangedEventHandler(BaseEventHandler):
EVENT_TYPE = feconf.EVENT_TYPE_THREAD_STATUS_CHANGED
@classmethod
def _handle_event(cls, exp_id, old_status, new_status):
pass
class Registry(object):
_event_types_to_classes = {}
@classmethod
def _refresh_registry(cls):
cls._event_types_to_classes.clear()
for obj_name, obj in globals().iteritems():
if inspect.isclass(obj) and issubclass(obj, BaseEventHandler):
if obj_name == 'BaseEventHandler':
continue
cls._event_types_to_classes[obj.EVENT_TYPE] = obj
@classmethod
def get_event_class_by_type(cls, event_type):
if event_type not in cls._event_types_to_classes:
cls._refresh_registry()
return cls._event_types_to_classes[event_type]
| true | true |
f7ff592b5a4fe666d585f0090c2696d51c305d36 | 4,679 | py | Python | core.py | Datacket/Invado | 20ca439d9a3151fd97e85c87e6dc264152410aea | [
"MIT"
] | 1 | 2019-01-16T19:56:44.000Z | 2019-01-16T19:56:44.000Z | core.py | Datacket/Invado | 20ca439d9a3151fd97e85c87e6dc264152410aea | [
"MIT"
] | null | null | null | core.py | Datacket/Invado | 20ca439d9a3151fd97e85c87e6dc264152410aea | [
"MIT"
] | null | null | null | import json
import pandas as pd
import numpy as np
import pymysql
import pymysql.cursors as pycurse
from datetime import datetime
from model_animal_tracking import *
import tensorflow as tf
from io import StringIO
from datetime import timedelta
from flask import Flask,jsonify,request
from sklearn.preprocessing import OneHotEncoder
app=Flask(__name__)
@app.route("/save",methods=["POST"])
def reply():
#lat,long,city,date of sighting, time of sighting, species, month of sighting
#float,float,string,date,str(M,A,E,N),
lat=request.args.get('lat',None)
lon=request.args.get('lon',None)
tos=request.args.get('tos',None)
dos=request.args.get('dos')
print(dos)
dt1=datetime.strptime(dos,'%Y-%m-%d %H:%M:%S')
dos=str(dos).split(' ')[0]
mos=int(dos.split('-')[1])
spec=request.args.get('spec',None)
dt2=datetime.now()
try:
conn=pymysql.connect(host="127.0.0.1",user="root",db='details',password="891998",cursorclass=pycurse.DictCursor)
with conn.cursor() as cur:
sql="INSERT INTO DETAILS (date,lat,lon,tos,spec,mos) VALUES(\'{}\',{},{},\'{}\',\'{}\',{})".format(*list(map(str,[dos,lat,lon,tos,spec,mos])))
cur.execute(sql)
conn.commit()
return jsonify({"Status":200})
except Exception as e:
return jsonify({"Status":str(e)})
var=model.fit(list(map(str,[lat,lon,tos,spec,mos])))
def lat_long(tup, list_long_lang, radius):
fres = []
for l in list_long_lang:
dis_for_l = edis(tup, l)
if is_short_enough(dis_for_l, radius):
fres.append(l)
if len(fres) == 15:
break
return fres
#return sorted(fres)[:15]
def edis(X, Y):
x1, y1, x2, y2 = X[0], X[1], Y[0], Y[1]
return np.sqrt(np.square(x1 - x2) + np.square(y1 - y2))
def is_short_enough(deg_dist, radius):
dist_in_km = np.cos(deg_dist) * 110
return True if dist_in_km < radius else False
from tqdm import tqdm
@app.route("/",methods=["GET"])
def get():
centre=list(map(float,[request.args.get('lat',None),request.args.get('lon',None)]))
date=request.args.get('dos',None)
mos=int(date.split('-')[1])
print("Hello world!")
if True:
conn=pymysql.connect(host="127.0.0.1",user="root",db='details',password="891998",cursorclass=pycurse.DictCursor)
with conn.cursor() as curr:
sql="SELECT * FROM DETAILS"
curr.execute(sql)
result=curr.fetchall()
latitude=[]
longitude=[]
print("Hello world!")
for i in tqdm(result):
latitude.append(i['lat'])
longitude.append(i['lon'])
l=list(zip(latitude,longitude))
lt_ln=lat_long(centre,l,5)
df=pd.DataFrame(result)
df["spec"] = df["spec"].apply(lambda x : x.lower())
df["spec"] = df["spec"].apply(lambda x : "snake" if x == "cobra" else x)
spec_copy = df["spec"].copy()
df["spec"]=df["spec"].apply(str.lower).astype("category").cat.codes
df["tos"]=df["tos"].astype("category").cat.codes
oh1=OneHotEncoder().fit(np.array(df["spec"]).reshape(-1,1))
l=oh1.transform(np.array(df["spec"]).reshape(-1,1)).toarray()
#l=l[:,1:]
oh2=OneHotEncoder().fit(np.array(df["tos"]).reshape(-1,1))
l2=oh2.transform(np.array(df["tos"]).reshape(-1,1)).toarray()
#l2=l2[:,1:]
s2=np.concatenate([np.array(df["lat"]).reshape(-1,1),np.array(df["lon"]).reshape(-1,1),np.array(df["mos"]).reshape(-1,1),l2],axis=1)
wlc=WildlifeCraziness(s2.shape[1],l.shape[1])
wlc.load_dataset(s2,l)
print("Hello World!!")
wlc.fit()
print("World")
dat=[np.array(centre[0]).reshape(-1,1),np.array(centre[1]).reshape(-1,1),np.array(mos).reshape(-1,1)]
test={}
for i in "MEAN":
#dat.append(np.array(l2.transform(i)).reshape(-1,1))
if i == 'A':
arr = [1, 0, 0, 0]
elif i == 'E':
arr = [0, 1, 0, 0]
elif i == 'M':
arr = [0, 0, 1, 0]
else:
arr = [0, 0, 0, 1]
l=sorted(set(spec_copy))
#print (l)
#print(np.concatenate([np.array(dat).reshape(-1,1),np.array(arr).reshape(-1,1)]).shape)
prediction=wlc.predict(np.concatenate([np.array(dat).reshape(-1,1),np.array(arr).reshape(-1,1)]).T, l)
test[i]=prediction
test["lat_ln"]=lt_ln
return jsonify(test)
# l2 as JSON
#except Exception as e:
# return jsonify({"Status":str(e)})
app.run(host="0.0.0.0",port=10400,debug=True)
| 38.669421 | 158 | 0.574268 | import json
import pandas as pd
import numpy as np
import pymysql
import pymysql.cursors as pycurse
from datetime import datetime
from model_animal_tracking import *
import tensorflow as tf
from io import StringIO
from datetime import timedelta
from flask import Flask,jsonify,request
from sklearn.preprocessing import OneHotEncoder
app=Flask(__name__)
@app.route("/save",methods=["POST"])
def reply():
lat=request.args.get('lat',None)
lon=request.args.get('lon',None)
tos=request.args.get('tos',None)
dos=request.args.get('dos')
print(dos)
dt1=datetime.strptime(dos,'%Y-%m-%d %H:%M:%S')
dos=str(dos).split(' ')[0]
mos=int(dos.split('-')[1])
spec=request.args.get('spec',None)
dt2=datetime.now()
try:
conn=pymysql.connect(host="127.0.0.1",user="root",db='details',password="891998",cursorclass=pycurse.DictCursor)
with conn.cursor() as cur:
sql="INSERT INTO DETAILS (date,lat,lon,tos,spec,mos) VALUES(\'{}\',{},{},\'{}\',\'{}\',{})".format(*list(map(str,[dos,lat,lon,tos,spec,mos])))
cur.execute(sql)
conn.commit()
return jsonify({"Status":200})
except Exception as e:
return jsonify({"Status":str(e)})
var=model.fit(list(map(str,[lat,lon,tos,spec,mos])))
def lat_long(tup, list_long_lang, radius):
fres = []
for l in list_long_lang:
dis_for_l = edis(tup, l)
if is_short_enough(dis_for_l, radius):
fres.append(l)
if len(fres) == 15:
break
return fres
def edis(X, Y):
x1, y1, x2, y2 = X[0], X[1], Y[0], Y[1]
return np.sqrt(np.square(x1 - x2) + np.square(y1 - y2))
def is_short_enough(deg_dist, radius):
dist_in_km = np.cos(deg_dist) * 110
return True if dist_in_km < radius else False
from tqdm import tqdm
@app.route("/",methods=["GET"])
def get():
centre=list(map(float,[request.args.get('lat',None),request.args.get('lon',None)]))
date=request.args.get('dos',None)
mos=int(date.split('-')[1])
print("Hello world!")
if True:
conn=pymysql.connect(host="127.0.0.1",user="root",db='details',password="891998",cursorclass=pycurse.DictCursor)
with conn.cursor() as curr:
sql="SELECT * FROM DETAILS"
curr.execute(sql)
result=curr.fetchall()
latitude=[]
longitude=[]
print("Hello world!")
for i in tqdm(result):
latitude.append(i['lat'])
longitude.append(i['lon'])
l=list(zip(latitude,longitude))
lt_ln=lat_long(centre,l,5)
df=pd.DataFrame(result)
df["spec"] = df["spec"].apply(lambda x : x.lower())
df["spec"] = df["spec"].apply(lambda x : "snake" if x == "cobra" else x)
spec_copy = df["spec"].copy()
df["spec"]=df["spec"].apply(str.lower).astype("category").cat.codes
df["tos"]=df["tos"].astype("category").cat.codes
oh1=OneHotEncoder().fit(np.array(df["spec"]).reshape(-1,1))
l=oh1.transform(np.array(df["spec"]).reshape(-1,1)).toarray()
oh2=OneHotEncoder().fit(np.array(df["tos"]).reshape(-1,1))
l2=oh2.transform(np.array(df["tos"]).reshape(-1,1)).toarray()
s2=np.concatenate([np.array(df["lat"]).reshape(-1,1),np.array(df["lon"]).reshape(-1,1),np.array(df["mos"]).reshape(-1,1),l2],axis=1)
wlc=WildlifeCraziness(s2.shape[1],l.shape[1])
wlc.load_dataset(s2,l)
print("Hello World!!")
wlc.fit()
print("World")
dat=[np.array(centre[0]).reshape(-1,1),np.array(centre[1]).reshape(-1,1),np.array(mos).reshape(-1,1)]
test={}
for i in "MEAN":
if i == 'A':
arr = [1, 0, 0, 0]
elif i == 'E':
arr = [0, 1, 0, 0]
elif i == 'M':
arr = [0, 0, 1, 0]
else:
arr = [0, 0, 0, 1]
l=sorted(set(spec_copy))
prediction=wlc.predict(np.concatenate([np.array(dat).reshape(-1,1),np.array(arr).reshape(-1,1)]).T, l)
test[i]=prediction
test["lat_ln"]=lt_ln
return jsonify(test)
app.run(host="0.0.0.0",port=10400,debug=True)
| true | true |
f7ff59edcbbc1b3a40cd7af030b4b13c846cc2b2 | 831 | py | Python | easysite/urls.py | Tian-rg/easysite | 6a34cb373e43c263e98dceae47f41c99b28803f1 | [
"MIT"
] | null | null | null | easysite/urls.py | Tian-rg/easysite | 6a34cb373e43c263e98dceae47f41c99b28803f1 | [
"MIT"
] | null | null | null | easysite/urls.py | Tian-rg/easysite | 6a34cb373e43c263e98dceae47f41c99b28803f1 | [
"MIT"
] | null | null | null | """easysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^easysite/', include('siteframe.urls')),
url(r'^admin/', admin.site.urls),
]
| 33.24 | 79 | 0.695548 | from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^easysite/', include('siteframe.urls')),
url(r'^admin/', admin.site.urls),
]
| true | true |
f7ff5a63ffd654ae3bce5b760918ad35855d4f53 | 2,521 | py | Python | pyatv/protocols/dmap/tags.py | Jacobs4/pyatv | 52956adf3b79198be52cc03649f3ddeee19f9e6c | [
"MIT"
] | 532 | 2017-02-01T19:23:28.000Z | 2022-03-29T09:57:39.000Z | pyatv/protocols/dmap/tags.py | Jacobs4/pyatv | 52956adf3b79198be52cc03649f3ddeee19f9e6c | [
"MIT"
] | 1,639 | 2017-02-01T19:22:04.000Z | 2022-03-31T17:26:40.000Z | pyatv/protocols/dmap/tags.py | bdraco/pyatv | 9541d21e6101c60866d832626be97bf962774cd5 | [
"MIT"
] | 102 | 2017-02-02T01:42:13.000Z | 2022-02-26T08:49:34.000Z | """Util functions for extracting and constructing DMAP data."""
import binascii
import plistlib
def read_str(data, start, length):
"""Extract a string from a position in a sequence."""
return data[start : start + length].decode("utf-8")
def read_uint(data, start, length):
"""Extract a uint from a position in a sequence."""
return int.from_bytes(data[start : start + length], byteorder="big")
def read_bool(data, start, length):
"""Extract a boolean from a position in a sequence."""
return read_uint(data, start, length) == 1
def read_bplist(data, start, length):
"""Extract a binary plist from a position in a sequence."""
# TODO: pylint doesn't find FMT_BINARY, why?
# pylint: disable=no-member
return plistlib.loads(data[start : start + length], fmt=plistlib.FMT_BINARY)
def read_bytes(data, start, length):
"""Extract binary data (in hex) from a position in a sequence."""
return "0x" + binascii.hexlify(data[start : start + length]).decode("ascii")
# pylint: disable=unused-argument
def read_ignore(data, start, length):
"""Use this to ignore data for all input."""
def uint8_tag(name, value):
"""Create a DMAP tag with uint8 data."""
return (
name.encode("utf-8") + b"\x00\x00\x00\x01" + value.to_bytes(1, byteorder="big")
)
def uint16_tag(name, value):
"""Create a DMAP tag with uint16 data."""
return (
name.encode("utf-8") + b"\x00\x00\x00\x02" + value.to_bytes(2, byteorder="big")
)
def uint32_tag(name, value):
"""Create a DMAP tag with uint32 data."""
return (
name.encode("utf-8") + b"\x00\x00\x00\x04" + value.to_bytes(4, byteorder="big")
)
def uint64_tag(name, value):
"""Create a DMAP tag with uint64 data."""
return (
name.encode("utf-8") + b"\x00\x00\x00\x08" + value.to_bytes(8, byteorder="big")
)
def bool_tag(name, value):
"""Create a DMAP tag with boolean data."""
return name.encode("utf-8") + b"\x00\x00\x00\x01" + (b"\x01" if value else b"\x00")
def raw_tag(name, value):
"""Create a DMAP tag with raw data."""
return name.encode("utf-8") + len(value).to_bytes(4, byteorder="big") + value
def string_tag(name, value):
"""Create a DMAP tag with string data."""
return (
name.encode("utf-8")
+ len(value).to_bytes(4, byteorder="big")
+ value.encode("utf-8")
)
def container_tag(name, data):
"""Create a DMAP tag with string data."""
return raw_tag(name, data) # Same as raw
| 28.325843 | 87 | 0.641412 |
import binascii
import plistlib
def read_str(data, start, length):
return data[start : start + length].decode("utf-8")
def read_uint(data, start, length):
return int.from_bytes(data[start : start + length], byteorder="big")
def read_bool(data, start, length):
return read_uint(data, start, length) == 1
def read_bplist(data, start, length):
# pylint: disable=no-member
return plistlib.loads(data[start : start + length], fmt=plistlib.FMT_BINARY)
def read_bytes(data, start, length):
return "0x" + binascii.hexlify(data[start : start + length]).decode("ascii")
# pylint: disable=unused-argument
def read_ignore(data, start, length):
def uint8_tag(name, value):
return (
name.encode("utf-8") + b"\x00\x00\x00\x01" + value.to_bytes(1, byteorder="big")
)
def uint16_tag(name, value):
return (
name.encode("utf-8") + b"\x00\x00\x00\x02" + value.to_bytes(2, byteorder="big")
)
def uint32_tag(name, value):
return (
name.encode("utf-8") + b"\x00\x00\x00\x04" + value.to_bytes(4, byteorder="big")
)
def uint64_tag(name, value):
return (
name.encode("utf-8") + b"\x00\x00\x00\x08" + value.to_bytes(8, byteorder="big")
)
def bool_tag(name, value):
return name.encode("utf-8") + b"\x00\x00\x00\x01" + (b"\x01" if value else b"\x00")
def raw_tag(name, value):
return name.encode("utf-8") + len(value).to_bytes(4, byteorder="big") + value
def string_tag(name, value):
return (
name.encode("utf-8")
+ len(value).to_bytes(4, byteorder="big")
+ value.encode("utf-8")
)
def container_tag(name, data):
return raw_tag(name, data) # Same as raw
| true | true |
f7ff5a712024609213b7c353c77109c35588f344 | 1,254 | py | Python | pop_music_highlighter/lib.py | ka5par/MIR | ca8d9ee84435299f680b158d9c92c2b6e47682b3 | [
"MIT"
] | null | null | null | pop_music_highlighter/lib.py | ka5par/MIR | ca8d9ee84435299f680b158d9c92c2b6e47682b3 | [
"MIT"
] | null | null | null | pop_music_highlighter/lib.py | ka5par/MIR | ca8d9ee84435299f680b158d9c92c2b6e47682b3 | [
"MIT"
] | 1 | 2021-05-08T11:47:59.000Z | 2021-05-08T11:47:59.000Z | import os.path
import numpy as np
import librosa
from pydub import AudioSegment
def chunk(incoming, n_chunk):
input_length = incoming.shape[1]
chunk_length = input_length // n_chunk
outputs = []
for i in range(incoming.shape[0]):
for j in range(n_chunk):
outputs.append(incoming[i, j*chunk_length:(j+1)*chunk_length, :])
outputs = np.array(outputs)
return outputs
def audio_read(f):
y, sr = librosa.core.load("data" + os.path.sep + f.name, sr=22050)
d = librosa.core.get_duration(y=y, sr=sr)
S = librosa.feature.melspectrogram(y, sr=sr, n_fft=2048, hop_length=512, n_mels=128)
S = np.transpose(np.log(1+10000*S))
S = np.expand_dims(S, axis=0)
return y, S, int(d)
def positional_encoding(batch_size, n_pos, d_pos):
# keep dim 0 for padding token position encoding zero vector
position_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / d_pos) for j in range(d_pos)]
if pos != 0 else np.zeros(d_pos) for pos in range(n_pos)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
position_enc = np.tile(position_enc, [batch_size, 1, 1])
return position_enc
| 32.153846 | 88 | 0.652313 | import os.path
import numpy as np
import librosa
from pydub import AudioSegment
def chunk(incoming, n_chunk):
input_length = incoming.shape[1]
chunk_length = input_length // n_chunk
outputs = []
for i in range(incoming.shape[0]):
for j in range(n_chunk):
outputs.append(incoming[i, j*chunk_length:(j+1)*chunk_length, :])
outputs = np.array(outputs)
return outputs
def audio_read(f):
y, sr = librosa.core.load("data" + os.path.sep + f.name, sr=22050)
d = librosa.core.get_duration(y=y, sr=sr)
S = librosa.feature.melspectrogram(y, sr=sr, n_fft=2048, hop_length=512, n_mels=128)
S = np.transpose(np.log(1+10000*S))
S = np.expand_dims(S, axis=0)
return y, S, int(d)
def positional_encoding(batch_size, n_pos, d_pos):
position_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / d_pos) for j in range(d_pos)]
if pos != 0 else np.zeros(d_pos) for pos in range(n_pos)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2])
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2])
position_enc = np.tile(position_enc, [batch_size, 1, 1])
return position_enc
| true | true |
f7ff5adcc7c106d8d1d071e95c0447f18f2e02b7 | 1,295 | py | Python | tensorflow/contrib/estimator/python/estimator/hooks.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/contrib/estimator/python/estimator/hooks.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/contrib/estimator/python/estimator/hooks.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""hooks python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import hooks
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
hooks.__all__ = [s for s in dir(hooks) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.hooks import *
| 39.242424 | 86 | 0.742085 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import hooks
_HAS_DYNAMIC_ATTRIBUTES = True
hooks.__all__ = [s for s in dir(hooks) if not s.startswith('__')]
from tensorflow_estimator.contrib.estimator.python.estimator.hooks import *
| true | true |
f7ff5bd285006ad39957ebd8922f565cba69c8d1 | 4,682 | py | Python | ocpl_ros/scripts/load_collision_objects.py | JeroenDM/ocpl | 51c19f2670327e17fabf5920ddad2c681f2bef4d | [
"MIT"
] | null | null | null | ocpl_ros/scripts/load_collision_objects.py | JeroenDM/ocpl | 51c19f2670327e17fabf5920ddad2c681f2bef4d | [
"MIT"
] | 1 | 2021-01-17T09:12:51.000Z | 2021-01-17T09:12:51.000Z | ocpl_ros/scripts/load_collision_objects.py | JeroenDM/ocpl | 51c19f2670327e17fabf5920ddad2c681f2bef4d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import rospy
import moveit_commander
from geometry_msgs.msg import Vector3, Quaternion, Pose, PoseStamped
def remove_all_objects(scene):
for name in scene.get_known_object_names():
scene.remove_world_object(name)
def create_3r_spheres():
pose_s = PoseStamped()
pose_s.header.frame_id = "world"
pose_s.pose = Pose(position=Vector3(2.0, 0.55, 0), orientation=Quaternion(w=1))
pose_2 = PoseStamped()
pose_2.header.frame_id = "world"
pose_2.pose = Pose(position=Vector3(1.9, -1, 0), orientation=Quaternion(w=1))
return ["sphere", "sphere"], [pose_s, pose_2], [0.2, 0.4]
def create_case_1_2018():
poses = [PoseStamped(), PoseStamped(), PoseStamped(), PoseStamped(), PoseStamped()]
for pose in poses:
pose.header.frame_id = "world"
poses[0].pose = Pose(position=Vector3(1.5, 1.75, 0), orientation=Quaternion(w=1))
poses[1].pose = Pose(position=Vector3(3.5, 2.1, 0), orientation=Quaternion(w=1))
poses[2].pose = Pose(position=Vector3(2, 3.45, 0), orientation=Quaternion(w=1))
poses[3].pose = Pose(position=Vector3(0.1, 2.1, 0), orientation=Quaternion(w=1))
poses[4].pose = Pose(position=Vector3(0.6, 1.25, 0), orientation=Quaternion(w=1))
return (
["box"] * 5,
poses,
[(1, 1.5, 1), (1, 2.2, 1), (4, 0.5, 1), (0.2, 2.2, 1), (0.8, 0.5, 1)],
)
def create_case_2_2018():
small_passage_width = 0.5
pose_1 = PoseStamped()
pose_1.header.frame_id = "world"
pose_1.pose = Pose(position=Vector3(6, -1, 0), orientation=Quaternion(w=1))
pose_2 = PoseStamped()
pose_2.header.frame_id = "world"
pose_2.pose = Pose(
position=Vector3(6, small_passage_width + 1, 0), orientation=Quaternion(w=1)
)
# pose_3 = PoseStamped()
# pose_3.header.frame_id = "world"
# pose_3.pose = Pose(
# position=Vector3(2, -1, 0), orientation=Quaternion(w=1)
# )
return ["box", "box"], [pose_1, pose_2], [(4, 2, 1), (4, 2, 1)]
# return ["box", "box", "box"], [pose_1, pose_2, pose_3], [(4, 2, 1), (4, 2, 1), (1, 1, 1)]
def create_case_3_2018():
poses = [PoseStamped(), PoseStamped(), PoseStamped(), PoseStamped(), PoseStamped()]
for pose in poses:
pose.header.frame_id = "world"
poses[0].pose = Pose(position=Vector3(2.5, -1.1, 0), orientation=Quaternion(w=1))
poses[1].pose = Pose(position=Vector3(2.5, 3.1, 0), orientation=Quaternion(w=1))
poses[2].pose = Pose(position=Vector3(1.1, 0.25, 0), orientation=Quaternion(w=1))
poses[3].pose = Pose(position=Vector3(2.6, 2.0, 0), orientation=Quaternion(w=1))
poses[4].pose = Pose(position=Vector3(4.1, 0.25, 0), orientation=Quaternion(w=1))
return (
["box"] * 5,
poses,
[(5, 0.2, 1), (5, 0.2, 1), (0.2, 2.5, 1), (0.2, 2, 1), (0.2, 2.5, 1)],
)
def create_case_teapot():
names = ["box"] * 4
poses = [PoseStamped(), PoseStamped(), PoseStamped(), PoseStamped()]
for pose in poses:
pose.header.frame_id = "world"
poses[0].pose = Pose(position=Vector3(0.9, 0.0, 1.1), orientation=Quaternion(w=1))
poses[1].pose = Pose(position=Vector3(0.9, 0.0, 0.25), orientation=Quaternion(w=1))
poses[2].pose = Pose(position=Vector3(0.9, -0.475, 0.8), orientation=Quaternion(w=1))
poses[3].pose = Pose(position=Vector3(0.9, 0.475, 0.8), orientation=Quaternion(w=1))
sizes = [None] * 4
sizes[0] = (0.4, 1.0, 0.05)
sizes[1] = (0.4, 1.0, 0.5)
sizes[2] = (0.4, 0.05, 0.6)
sizes[3] = (0.4, 0.05, 0.6)
return (
names,
poses,
sizes,
)
factories = {
"3r_spheres": create_3r_spheres,
"case_1_2018": create_case_1_2018,
"case_2_2018": create_case_2_2018,
"case_3_2018": create_case_3_2018,
"teapot": create_case_teapot,
}
if __name__ == "__main__":
rospy.init_node("load_collision_objects")
default_scene = "3r_spheres"
selected_scene = ""
if len(sys.argv) < 2:
print("Using default collision scene {}".format(default_scene))
selected_scene = default_scene
else:
if sys.argv[1] in factories:
selected_scene = sys.argv[1]
else:
rospy.loginfo("Could find a scene with name {}".format(sys.argv[1]))
scene = moveit_commander.PlanningSceneInterface()
rospy.sleep(1.0)
remove_all_objects(scene)
types, poses, sizes = factories[selected_scene]()
for i, t, p, s in zip(range(len(poses)), types, poses, sizes):
if t == "box":
scene.add_box("box_{}".format(i), p, size=s)
elif t == "sphere":
scene.add_sphere("sphere_{}".format(i), p, s)
rospy.loginfo("Done!")
| 32.971831 | 95 | 0.614481 |
import sys
import rospy
import moveit_commander
from geometry_msgs.msg import Vector3, Quaternion, Pose, PoseStamped
def remove_all_objects(scene):
for name in scene.get_known_object_names():
scene.remove_world_object(name)
def create_3r_spheres():
pose_s = PoseStamped()
pose_s.header.frame_id = "world"
pose_s.pose = Pose(position=Vector3(2.0, 0.55, 0), orientation=Quaternion(w=1))
pose_2 = PoseStamped()
pose_2.header.frame_id = "world"
pose_2.pose = Pose(position=Vector3(1.9, -1, 0), orientation=Quaternion(w=1))
return ["sphere", "sphere"], [pose_s, pose_2], [0.2, 0.4]
def create_case_1_2018():
poses = [PoseStamped(), PoseStamped(), PoseStamped(), PoseStamped(), PoseStamped()]
for pose in poses:
pose.header.frame_id = "world"
poses[0].pose = Pose(position=Vector3(1.5, 1.75, 0), orientation=Quaternion(w=1))
poses[1].pose = Pose(position=Vector3(3.5, 2.1, 0), orientation=Quaternion(w=1))
poses[2].pose = Pose(position=Vector3(2, 3.45, 0), orientation=Quaternion(w=1))
poses[3].pose = Pose(position=Vector3(0.1, 2.1, 0), orientation=Quaternion(w=1))
poses[4].pose = Pose(position=Vector3(0.6, 1.25, 0), orientation=Quaternion(w=1))
return (
["box"] * 5,
poses,
[(1, 1.5, 1), (1, 2.2, 1), (4, 0.5, 1), (0.2, 2.2, 1), (0.8, 0.5, 1)],
)
def create_case_2_2018():
small_passage_width = 0.5
pose_1 = PoseStamped()
pose_1.header.frame_id = "world"
pose_1.pose = Pose(position=Vector3(6, -1, 0), orientation=Quaternion(w=1))
pose_2 = PoseStamped()
pose_2.header.frame_id = "world"
pose_2.pose = Pose(
position=Vector3(6, small_passage_width + 1, 0), orientation=Quaternion(w=1)
)
return ["box", "box"], [pose_1, pose_2], [(4, 2, 1), (4, 2, 1)]
def create_case_3_2018():
poses = [PoseStamped(), PoseStamped(), PoseStamped(), PoseStamped(), PoseStamped()]
for pose in poses:
pose.header.frame_id = "world"
poses[0].pose = Pose(position=Vector3(2.5, -1.1, 0), orientation=Quaternion(w=1))
poses[1].pose = Pose(position=Vector3(2.5, 3.1, 0), orientation=Quaternion(w=1))
poses[2].pose = Pose(position=Vector3(1.1, 0.25, 0), orientation=Quaternion(w=1))
poses[3].pose = Pose(position=Vector3(2.6, 2.0, 0), orientation=Quaternion(w=1))
poses[4].pose = Pose(position=Vector3(4.1, 0.25, 0), orientation=Quaternion(w=1))
return (
["box"] * 5,
poses,
[(5, 0.2, 1), (5, 0.2, 1), (0.2, 2.5, 1), (0.2, 2, 1), (0.2, 2.5, 1)],
)
def create_case_teapot():
names = ["box"] * 4
poses = [PoseStamped(), PoseStamped(), PoseStamped(), PoseStamped()]
for pose in poses:
pose.header.frame_id = "world"
poses[0].pose = Pose(position=Vector3(0.9, 0.0, 1.1), orientation=Quaternion(w=1))
poses[1].pose = Pose(position=Vector3(0.9, 0.0, 0.25), orientation=Quaternion(w=1))
poses[2].pose = Pose(position=Vector3(0.9, -0.475, 0.8), orientation=Quaternion(w=1))
poses[3].pose = Pose(position=Vector3(0.9, 0.475, 0.8), orientation=Quaternion(w=1))
sizes = [None] * 4
sizes[0] = (0.4, 1.0, 0.05)
sizes[1] = (0.4, 1.0, 0.5)
sizes[2] = (0.4, 0.05, 0.6)
sizes[3] = (0.4, 0.05, 0.6)
return (
names,
poses,
sizes,
)
factories = {
"3r_spheres": create_3r_spheres,
"case_1_2018": create_case_1_2018,
"case_2_2018": create_case_2_2018,
"case_3_2018": create_case_3_2018,
"teapot": create_case_teapot,
}
if __name__ == "__main__":
rospy.init_node("load_collision_objects")
default_scene = "3r_spheres"
selected_scene = ""
if len(sys.argv) < 2:
print("Using default collision scene {}".format(default_scene))
selected_scene = default_scene
else:
if sys.argv[1] in factories:
selected_scene = sys.argv[1]
else:
rospy.loginfo("Could find a scene with name {}".format(sys.argv[1]))
scene = moveit_commander.PlanningSceneInterface()
rospy.sleep(1.0)
remove_all_objects(scene)
types, poses, sizes = factories[selected_scene]()
for i, t, p, s in zip(range(len(poses)), types, poses, sizes):
if t == "box":
scene.add_box("box_{}".format(i), p, size=s)
elif t == "sphere":
scene.add_sphere("sphere_{}".format(i), p, s)
rospy.loginfo("Done!")
| true | true |
f7ff5c0c0af4865e0ed1f0d1983e8b1a0aee5ddd | 1,209 | py | Python | instagram/migrations/0003_comment.py | MungaiKeren/The_Gram_Master | 10dfd8756d11f9e34793428d75c1d3e19a1a2dc0 | [
"MIT"
] | 2 | 2019-10-13T08:36:51.000Z | 2021-03-30T06:30:38.000Z | instagram/migrations/0003_comment.py | MungaiKeren/The_Gram_Master | 10dfd8756d11f9e34793428d75c1d3e19a1a2dc0 | [
"MIT"
] | 5 | 2020-02-12T03:14:17.000Z | 2021-09-08T01:20:44.000Z | instagram/migrations/0003_comment.py | MungaiKeren/The_Gram_Master | 10dfd8756d11f9e34793428d75c1d3e19a1a2dc0 | [
"MIT"
] | 1 | 2021-02-25T17:07:57.000Z | 2021-02-25T17:07:57.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2019-10-14 12:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('instagram', '0002_auto_20191014_1546'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_comments', models.IntegerField(default=0)),
('comment', models.CharField(max_length=200)),
('date', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('image', models.ForeignKey(default='pic_folder/eat_us.jpg', on_delete=django.db.models.deletion.CASCADE, to='instagram.Image')),
],
options={
'ordering': ['-date'],
},
),
]
| 36.636364 | 145 | 0.622002 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('instagram', '0002_auto_20191014_1546'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_comments', models.IntegerField(default=0)),
('comment', models.CharField(max_length=200)),
('date', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('image', models.ForeignKey(default='pic_folder/eat_us.jpg', on_delete=django.db.models.deletion.CASCADE, to='instagram.Image')),
],
options={
'ordering': ['-date'],
},
),
]
| true | true |
f7ff5ca356091412e006b8365159bcec1a5147e3 | 6,509 | py | Python | esmond/api/tests/test_translator.py | esnet/esmond-test | 8aaec580e8fa8a27bddddc0fd91fd3e96730515f | [
"BSD-3-Clause-LBNL"
] | null | null | null | esmond/api/tests/test_translator.py | esnet/esmond-test | 8aaec580e8fa8a27bddddc0fd91fd3e96730515f | [
"BSD-3-Clause-LBNL"
] | null | null | null | esmond/api/tests/test_translator.py | esnet/esmond-test | 8aaec580e8fa8a27bddddc0fd91fd3e96730515f | [
"BSD-3-Clause-LBNL"
] | null | null | null |
from django.test import TestCase
from esmond.poll import IfRefTranslator
from esmond.api.tests.test_correlator import MockOID
EXAMPLE_DATA = {
'IfRefTranslator':
{'check': [('ifPhysAddress.1', '0c:a4:02:50:74:01'),
('ifPhysAddress.2', '8c:90:d3:90:99:69'),
('ifPhysAddress.3', '00:00:00:00:00:00'),
('ifPhysAddress.4', '00:00:00:00:00:00'),
('ifPhysAddress.5', '8c:90:d3:90:98:cc'),
('ifPhysAddress.6', '8c:90:d3:90:99:59'),
('ifPhysAddress.7', '8c:90:d3:90:99:58'),
('ifPhysAddress.8', '8c:90:d3:8a:47:6c'),
('ifPhysAddress.9', '8c:90:d3:8a:47:6d'),
('ifPhysAddress.10', '8c:90:d3:8a:47:6a'),
('ifPhysAddress.11', '0c:a4:02:f4:16:50'),
('ifPhysAddress.12', '0c:a4:02:f4:16:50'),
('ifPhysAddress.13', '0c:a4:02:50:74:01'),
('ifPhysAddress.14', '0c:a4:02:50:74:01'),
('ifPhysAddress.15', '8c:90:d3:8a:47:72'),
('ifPhysAddress.16', '8c:90:d3:8a:47:73'),
('ifPhysAddress.17', '8c:90:d3:8a:47:74'),
('ifPhysAddress.18', '8c:90:d3:8a:47:6f'),
('ifPhysAddress.19', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.20', '8c:90:d3:8a:47:71'),
('ifPhysAddress.21', '8c:90:d3:8a:47:71'),
('ifPhysAddress.22', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.23', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.24', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.25', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.26', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.27', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.28', '8c:90:d3:8a:47:69'),
('ifPhysAddress.29', '8c:90:d3:8a:47:69'),
('ifPhysAddress.30', '0c:a4:02:f4:16:50'),
('ifPhysAddress.35684352', '8c:90:d3:90:99:69'),
('ifPhysAddress.69238784', '8c:90:d3:90:98:cc'),
('ifPhysAddress.102793216', '8c:90:d3:90:99:59'),
('ifPhysAddress.136347648', '8c:90:d3:90:99:58'),
('ifPhysAddress.169902080', '0c:a4:02:f4:16:50'),
('ifPhysAddress.337674240', '8c:90:d3:8a:47:69'),
('ifPhysAddress.337707008', '8c:90:d3:8a:47:6a'),
('ifPhysAddress.337739776', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.337772544', '8c:90:d3:8a:47:6c'),
('ifPhysAddress.337805312', '8c:90:d3:8a:47:6d'),
('ifPhysAddress.337838080', '8c:90:d3:8a:47:6e'),
('ifPhysAddress.337870848', '8c:90:d3:8a:47:6f'),
('ifPhysAddress.337903616', '8c:90:d3:8a:47:70'),
('ifPhysAddress.337936384', '8c:90:d3:8a:47:71'),
('ifPhysAddress.337969152', '8c:90:d3:8a:47:72'),
('ifPhysAddress.338001920', '8c:90:d3:8a:47:73'),
('ifPhysAddress.338034688', '8c:90:d3:8a:47:74'),
('ifPhysAddress.369131520', '8c:90:d3:a9:47:f5'),
('ifPhysAddress.402685952', '8c:90:d3:bf:78:8a'),
('ifSpeed.1', 0), # make sure other OIDS pass through OK
('ifSpeed.2', 4294967295),
],
'data': [('ifPhysAddress.1', '\x0c\xa4\x02Pt\x01'),
('ifPhysAddress.2', '\x8c\x90\xd3\x90\x99i'),
('ifPhysAddress.3', '\x00\x00\x00\x00\x00\x00'),
('ifPhysAddress.4', '\x00\x00\x00\x00\x00\x00'),
('ifPhysAddress.5', '\x8c\x90\xd3\x90\x98\xcc'),
('ifPhysAddress.6', '\x8c\x90\xd3\x90\x99Y'),
('ifPhysAddress.7', '\x8c\x90\xd3\x90\x99X'),
('ifPhysAddress.8', '\x8c\x90\xd3\x8aGl'),
('ifPhysAddress.9', '\x8c\x90\xd3\x8aGm'),
('ifPhysAddress.10', '\x8c\x90\xd3\x8aGj'),
('ifPhysAddress.11', '\x0c\xa4\x02\xf4\x16P'),
('ifPhysAddress.12', '\x0c\xa4\x02\xf4\x16P'),
('ifPhysAddress.13', '\x0c\xa4\x02Pt\x01'),
('ifPhysAddress.14', '\x0c\xa4\x02Pt\x01'),
('ifPhysAddress.15', '\x8c\x90\xd3\x8aGr'),
('ifPhysAddress.16', '\x8c\x90\xd3\x8aGs'),
('ifPhysAddress.17', '\x8c\x90\xd3\x8aGt'),
('ifPhysAddress.18', '\x8c\x90\xd3\x8aGo'),
('ifPhysAddress.19', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.20', '\x8c\x90\xd3\x8aGq'),
('ifPhysAddress.21', '\x8c\x90\xd3\x8aGq'),
('ifPhysAddress.22', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.23', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.24', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.25', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.26', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.27', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.28', '\x8c\x90\xd3\x8aGi'),
('ifPhysAddress.29', '\x8c\x90\xd3\x8aGi'),
('ifPhysAddress.30', '\x0c\xa4\x02\xf4\x16P'),
('ifPhysAddress.35684352', '\x8c\x90\xd3\x90\x99i'),
('ifPhysAddress.69238784', '\x8c\x90\xd3\x90\x98\xcc'),
('ifPhysAddress.102793216', '\x8c\x90\xd3\x90\x99Y'),
('ifPhysAddress.136347648', '\x8c\x90\xd3\x90\x99X'),
('ifPhysAddress.169902080', '\x0c\xa4\x02\xf4\x16P'),
('ifPhysAddress.337674240', '\x8c\x90\xd3\x8aGi'),
('ifPhysAddress.337707008', '\x8c\x90\xd3\x8aGj'),
('ifPhysAddress.337739776', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.337772544', '\x8c\x90\xd3\x8aGl'),
('ifPhysAddress.337805312', '\x8c\x90\xd3\x8aGm'),
('ifPhysAddress.337838080', '\x8c\x90\xd3\x8aGn'),
('ifPhysAddress.337870848', '\x8c\x90\xd3\x8aGo'),
('ifPhysAddress.337903616', '\x8c\x90\xd3\x8aGp'),
('ifPhysAddress.337936384', '\x8c\x90\xd3\x8aGq'),
('ifPhysAddress.337969152', '\x8c\x90\xd3\x8aGr'),
('ifPhysAddress.338001920', '\x8c\x90\xd3\x8aGs'),
('ifPhysAddress.338034688', '\x8c\x90\xd3\x8aGt'),
('ifPhysAddress.369131520', '\x8c\x90\xd3\xa9G\xf5'),
('ifPhysAddress.402685952', '\x8c\x90\xd3\xbfx\x8a'),
('ifSpeed.1', 0), # make sure other OIDs pass through OK
('ifSpeed.2', 4294967295),
]
},
}
class TestTranslators(TestCase):
def test_translators(self):
for translator, oid_name in (
(IfRefTranslator, 'IfRefTranslator'),
):
t = translator()
results = sorted(t.translate(EXAMPLE_DATA[oid_name]['data']))
self.assertEqual(sorted(EXAMPLE_DATA[oid_name]['check']), results)
| 52.491935 | 70 | 0.538024 |
from django.test import TestCase
from esmond.poll import IfRefTranslator
from esmond.api.tests.test_correlator import MockOID
EXAMPLE_DATA = {
'IfRefTranslator':
{'check': [('ifPhysAddress.1', '0c:a4:02:50:74:01'),
('ifPhysAddress.2', '8c:90:d3:90:99:69'),
('ifPhysAddress.3', '00:00:00:00:00:00'),
('ifPhysAddress.4', '00:00:00:00:00:00'),
('ifPhysAddress.5', '8c:90:d3:90:98:cc'),
('ifPhysAddress.6', '8c:90:d3:90:99:59'),
('ifPhysAddress.7', '8c:90:d3:90:99:58'),
('ifPhysAddress.8', '8c:90:d3:8a:47:6c'),
('ifPhysAddress.9', '8c:90:d3:8a:47:6d'),
('ifPhysAddress.10', '8c:90:d3:8a:47:6a'),
('ifPhysAddress.11', '0c:a4:02:f4:16:50'),
('ifPhysAddress.12', '0c:a4:02:f4:16:50'),
('ifPhysAddress.13', '0c:a4:02:50:74:01'),
('ifPhysAddress.14', '0c:a4:02:50:74:01'),
('ifPhysAddress.15', '8c:90:d3:8a:47:72'),
('ifPhysAddress.16', '8c:90:d3:8a:47:73'),
('ifPhysAddress.17', '8c:90:d3:8a:47:74'),
('ifPhysAddress.18', '8c:90:d3:8a:47:6f'),
('ifPhysAddress.19', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.20', '8c:90:d3:8a:47:71'),
('ifPhysAddress.21', '8c:90:d3:8a:47:71'),
('ifPhysAddress.22', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.23', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.24', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.25', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.26', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.27', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.28', '8c:90:d3:8a:47:69'),
('ifPhysAddress.29', '8c:90:d3:8a:47:69'),
('ifPhysAddress.30', '0c:a4:02:f4:16:50'),
('ifPhysAddress.35684352', '8c:90:d3:90:99:69'),
('ifPhysAddress.69238784', '8c:90:d3:90:98:cc'),
('ifPhysAddress.102793216', '8c:90:d3:90:99:59'),
('ifPhysAddress.136347648', '8c:90:d3:90:99:58'),
('ifPhysAddress.169902080', '0c:a4:02:f4:16:50'),
('ifPhysAddress.337674240', '8c:90:d3:8a:47:69'),
('ifPhysAddress.337707008', '8c:90:d3:8a:47:6a'),
('ifPhysAddress.337739776', '8c:90:d3:8a:47:6b'),
('ifPhysAddress.337772544', '8c:90:d3:8a:47:6c'),
('ifPhysAddress.337805312', '8c:90:d3:8a:47:6d'),
('ifPhysAddress.337838080', '8c:90:d3:8a:47:6e'),
('ifPhysAddress.337870848', '8c:90:d3:8a:47:6f'),
('ifPhysAddress.337903616', '8c:90:d3:8a:47:70'),
('ifPhysAddress.337936384', '8c:90:d3:8a:47:71'),
('ifPhysAddress.337969152', '8c:90:d3:8a:47:72'),
('ifPhysAddress.338001920', '8c:90:d3:8a:47:73'),
('ifPhysAddress.338034688', '8c:90:d3:8a:47:74'),
('ifPhysAddress.369131520', '8c:90:d3:a9:47:f5'),
('ifPhysAddress.402685952', '8c:90:d3:bf:78:8a'),
('ifSpeed.1', 0),
('ifSpeed.2', 4294967295),
],
'data': [('ifPhysAddress.1', '\x0c\xa4\x02Pt\x01'),
('ifPhysAddress.2', '\x8c\x90\xd3\x90\x99i'),
('ifPhysAddress.3', '\x00\x00\x00\x00\x00\x00'),
('ifPhysAddress.4', '\x00\x00\x00\x00\x00\x00'),
('ifPhysAddress.5', '\x8c\x90\xd3\x90\x98\xcc'),
('ifPhysAddress.6', '\x8c\x90\xd3\x90\x99Y'),
('ifPhysAddress.7', '\x8c\x90\xd3\x90\x99X'),
('ifPhysAddress.8', '\x8c\x90\xd3\x8aGl'),
('ifPhysAddress.9', '\x8c\x90\xd3\x8aGm'),
('ifPhysAddress.10', '\x8c\x90\xd3\x8aGj'),
('ifPhysAddress.11', '\x0c\xa4\x02\xf4\x16P'),
('ifPhysAddress.12', '\x0c\xa4\x02\xf4\x16P'),
('ifPhysAddress.13', '\x0c\xa4\x02Pt\x01'),
('ifPhysAddress.14', '\x0c\xa4\x02Pt\x01'),
('ifPhysAddress.15', '\x8c\x90\xd3\x8aGr'),
('ifPhysAddress.16', '\x8c\x90\xd3\x8aGs'),
('ifPhysAddress.17', '\x8c\x90\xd3\x8aGt'),
('ifPhysAddress.18', '\x8c\x90\xd3\x8aGo'),
('ifPhysAddress.19', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.20', '\x8c\x90\xd3\x8aGq'),
('ifPhysAddress.21', '\x8c\x90\xd3\x8aGq'),
('ifPhysAddress.22', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.23', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.24', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.25', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.26', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.27', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.28', '\x8c\x90\xd3\x8aGi'),
('ifPhysAddress.29', '\x8c\x90\xd3\x8aGi'),
('ifPhysAddress.30', '\x0c\xa4\x02\xf4\x16P'),
('ifPhysAddress.35684352', '\x8c\x90\xd3\x90\x99i'),
('ifPhysAddress.69238784', '\x8c\x90\xd3\x90\x98\xcc'),
('ifPhysAddress.102793216', '\x8c\x90\xd3\x90\x99Y'),
('ifPhysAddress.136347648', '\x8c\x90\xd3\x90\x99X'),
('ifPhysAddress.169902080', '\x0c\xa4\x02\xf4\x16P'),
('ifPhysAddress.337674240', '\x8c\x90\xd3\x8aGi'),
('ifPhysAddress.337707008', '\x8c\x90\xd3\x8aGj'),
('ifPhysAddress.337739776', '\x8c\x90\xd3\x8aGk'),
('ifPhysAddress.337772544', '\x8c\x90\xd3\x8aGl'),
('ifPhysAddress.337805312', '\x8c\x90\xd3\x8aGm'),
('ifPhysAddress.337838080', '\x8c\x90\xd3\x8aGn'),
('ifPhysAddress.337870848', '\x8c\x90\xd3\x8aGo'),
('ifPhysAddress.337903616', '\x8c\x90\xd3\x8aGp'),
('ifPhysAddress.337936384', '\x8c\x90\xd3\x8aGq'),
('ifPhysAddress.337969152', '\x8c\x90\xd3\x8aGr'),
('ifPhysAddress.338001920', '\x8c\x90\xd3\x8aGs'),
('ifPhysAddress.338034688', '\x8c\x90\xd3\x8aGt'),
('ifPhysAddress.369131520', '\x8c\x90\xd3\xa9G\xf5'),
('ifPhysAddress.402685952', '\x8c\x90\xd3\xbfx\x8a'),
('ifSpeed.1', 0),
('ifSpeed.2', 4294967295),
]
},
}
class TestTranslators(TestCase):
def test_translators(self):
for translator, oid_name in (
(IfRefTranslator, 'IfRefTranslator'),
):
t = translator()
results = sorted(t.translate(EXAMPLE_DATA[oid_name]['data']))
self.assertEqual(sorted(EXAMPLE_DATA[oid_name]['check']), results)
| true | true |
f7ff5cad272e07a73c8415f5d0bab66dc20bb6e8 | 11,678 | py | Python | quidel_covidtest/delphi_quidel_covidtest/pull.py | benjaminysmith/covidcast-indicators | b1474cd68a1497166fefe4beffd4d5ff867b9a61 | [
"MIT"
] | null | null | null | quidel_covidtest/delphi_quidel_covidtest/pull.py | benjaminysmith/covidcast-indicators | b1474cd68a1497166fefe4beffd4d5ff867b9a61 | [
"MIT"
] | null | null | null | quidel_covidtest/delphi_quidel_covidtest/pull.py | benjaminysmith/covidcast-indicators | b1474cd68a1497166fefe4beffd4d5ff867b9a61 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Simply downloads email attachments.
Uses this handy package: https://pypi.org/project/imap-tools/
"""
import io
from os.path import join
import os
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from imap_tools import MailBox, A, AND
def get_from_email(start_date, end_date, mail_server,
account, sender, password):
"""
Get raw data from email account.
Args:
start_date: datetime.datetime
pull data from email received from the start date
end_date: datetime.datetime
pull data from email received on/before the end date
mail_server: str
account: str
email account to receive new data
sender: str
email account of the sender
password: str
password of the datadrop email
output:
df: pd.DataFrame
"""
time_flag = None
df = pd.DataFrame(columns=['SofiaSerNum', 'TestDate', 'Facility', 'City',
'State', 'Zip', 'PatientAge', 'Result1', 'Result2',
'OverallResult', 'County', 'FacilityType', 'Assay',
'SCO1', 'SCO2', 'CLN', 'CSN', 'InstrType',
'StorageDate', 'ResultId', 'SarsTestNumber'])
with MailBox(mail_server).login(account, password, 'INBOX') as mailbox:
for search_date in [start_date + timedelta(days=x)
for x in range((end_date - start_date).days + 1)]:
for message in mailbox.fetch(A(AND(date=search_date.date(), from_=sender))):
for att in message.attachments:
name = att.filename
# Only consider covid tests
if "Sars" not in name:
continue
print("Pulling data received on %s"%search_date.date())
toread = io.BytesIO()
toread.write(att.payload)
toread.seek(0) # reset the pointer
newdf = pd.read_excel(toread) # now read to dataframe
df = df.append(newdf)
time_flag = search_date
return df, time_flag
def fix_zipcode(df):
"""Fix zipcode that is 9 digit instead of 5 digit."""
zipcode5 = []
fixnum = 0
for zipcode in df['Zip'].values:
if isinstance(zipcode, str) and '-' in zipcode:
zipcode5.append(int(zipcode.split('-')[0]))
fixnum += 1
else:
zipcode = int(float(zipcode))
zipcode5.append(zipcode)
df['zip'] = zipcode5
# print('Fixing %.2f %% of the data' % (fixnum * 100 / len(zipcode5)))
return df
def fix_date(df):
"""
Remove invalid dates and select correct test date to use.
Quidel Covid Test are labeled with Test Date and Storage Date. In principle,
the TestDate should reflect when the test was performed and the StorageDate
when the test was logged in the MyVirena cloud storage device. We expect
that the test date should precede the storage date by several days. However,
in the actual data the test date can be far earlier than the storage date
and the test date can also occur after the storage date.
- For most of the cases, use test date as the timestamp
- Remove tests with a storage date which is earlier than the test date
- If the storage date is 90 days later than the test date, the storage
will be adopted instead
"""
df.insert(2, "timestamp", df["TestDate"])
mask = df["TestDate"] <= df["StorageDate"]
print("Removing %.2f%% of unusual data" % ((len(df) - np.sum(mask)) * 100 / len(df)))
df = df[mask]
mask = df["StorageDate"] - df["TestDate"] > pd.Timedelta(days=90)
print("Fixing %.2f%% of outdated data" % (np.sum(mask) * 100 / len(df)))
df["timestamp"].values[mask] = df["StorageDate"].values[mask]
return df
def preprocess_new_data(start_date, end_date, mail_server, account,
sender, password, test_mode):
"""
Pull and pre-process Quidel Covid Test data from datadrop email.
Drop unnecessary columns. Temporarily consider the positive rate
sensor only which is related to number of total tests and number
of positive tests.
Args:
start_date: datetime.datetime
pull data from email received from the start date
end_date: datetime.datetime
pull data from email received on/before the end date
mail_server: str
account: str
email account to receive new data
sender: str
email account of the sender
password: str
password of the datadrop email
test_mode: bool
pull raw data from email or not
output:
df: pd.DataFrame
time_flag: datetime.date:
the actual pull end date on which we successfully pull the data
"""
if test_mode:
test_data_dir = "./test_data/test_data.xlsx"
df, time_flag = pd.read_excel(test_data_dir), datetime(2020, 8, 17)
else:
# Get new data from email
df, time_flag = get_from_email(start_date, end_date, mail_server,
account, sender, password)
# No new data can be pulled
if time_flag is None:
return df, time_flag
# Fix some of the fipcodes that are 9 digit instead of 5 digit
df = fix_zipcode(df)
# Create a column CanonicalDate according to StarageDate and TestDate
df = fix_date(df)
# Compute overallPositive
overall_pos = df[df["OverallResult"] == "positive"].groupby(
by=["timestamp", "zip"],
as_index=False)['OverallResult'].count()
overall_pos["positiveTest"] = overall_pos["OverallResult"]
overall_pos.drop(labels="OverallResult", axis="columns", inplace=True)
# Compute overallTotal
overall_total = df.groupby(
by=["timestamp", "zip"],
as_index=False)['OverallResult'].count()
overall_total["totalTest"] = overall_total["OverallResult"]
overall_total.drop(labels="OverallResult", axis="columns", inplace=True)
# Compute numUniqueDevices
numUniqueDevices = df.groupby(
by=["timestamp", "zip"],
as_index=False)["SofiaSerNum"].agg({"SofiaSerNum": "nunique"}).rename(
columns={"SofiaSerNum": "numUniqueDevices"}
)
df_merged = overall_total.merge(
numUniqueDevices, on=["timestamp", "zip"], how="left"
).merge(
overall_pos, on=["timestamp", "zip"], how="left"
).fillna(0).drop_duplicates()
return df_merged, time_flag
def check_intermediate_file(cache_dir, pull_start_date):
"""Check whether there is a cache file containing historical data already."""
for filename in os.listdir(cache_dir):
if ".csv" in filename:
pull_start_date = datetime.strptime(filename.split("_")[2].split(".")[0],
'%Y%m%d') + timedelta(days=1)
previous_df = pd.read_csv(os.path.join(cache_dir, filename),
sep=",", parse_dates=["timestamp"])
return previous_df, pull_start_date
return None, pull_start_date
def pull_quidel_covidtest(params):
"""
Pull the quidel covid test data and ecide whether to combine the new data with stored historical records in ./cache.
Parameters:
params: dict
including all the information read from params.json
end_from_today_minus: int
report data until - X days
export_day_range: int
number of dates to report
Returns:
DataFrame:
A data frame containinig the pre-process data with columns:
timestamp, numUniqueDevices, positiveTest, totalTest
datetime.datetime
the first date of the report
datetime.datetime
the last date of the report
"""
cache_dir = params["cache_dir"]
mail_server = params["mail_server"]
account = params["account"]
password = params["password"]
sender = params["sender"]
test_mode = (params["mode"] == "test")
# pull new data only that has not been ingested
previous_df, pull_start_date = check_intermediate_file(
cache_dir,
datetime.strptime(params["pull_start_date"], '%Y-%m-%d'))
if params["pull_end_date"] == "":
pull_end_date = datetime.today()
else:
pull_end_date = datetime.strptime(params["pull_end_date"], '%Y-%m-%d')
# Pull data from the email at 5 digit zipcode level
# Use _end_date to check the most recent date that we received data
df, _end_date = preprocess_new_data(
pull_start_date, pull_end_date, mail_server,
account, sender, password, test_mode)
# Utilize previously stored data
if previous_df is not None:
df = previous_df.append(df).groupby(["timestamp", "zip"]).sum().reset_index()
return df, _end_date
def check_export_end_date(input_export_end_date, _end_date,
end_from_today_minus):
"""
Update the export_end_date according to the data received.
By default, set the export end date to be the last pulling date - 5 days
(end_from_today_minus = 5).
Otherwise, use the required date if it is earlier than the default one.
Parameter:
input_export_end_date: str
read from params
_end_date: datetime.datetime
updated according the data received
end_from_today_minus: int
report data until - X days
Returns:
datetime.datetime
export data from which date
"""
export_end_date = _end_date - timedelta(days=end_from_today_minus)
if input_export_end_date != "":
input_export_end_date = datetime.strptime(input_export_end_date, '%Y-%m-%d')
if input_export_end_date < export_end_date:
return input_export_end_date
return export_end_date
def check_export_start_date(export_start_date, export_end_date,
export_day_range):
"""
Update export_start_date according to the export_end_date so that it could be export_end_date - export_day_range.
Parameters:
export_start_date: str
Read from params
export_end_date: datetime.datetime
Calculated according to the data received
export_day_range: int
Number of days to report
Returns:
datetime.datetime
export data until which date
"""
if export_start_date == "":
export_start_date = datetime(2020, 5, 26)
else:
export_start_date = datetime.strptime(export_start_date, '%Y-%m-%d')
# Only export data from -45 days to -5 days
if (export_end_date - export_start_date).days > export_day_range:
export_start_date = export_end_date - timedelta(days=export_day_range)
if export_start_date < datetime(2020, 5, 26):
return datetime(2020, 5, 26)
return export_start_date
def update_cache_file(df, _end_date, cache_dir):
"""
Update cache file. Remove the old one, export the new one.
Parameter:
df: pd.DataFrame
Pre-process file at ZipCode level
_end_date:
The most recent date when the raw data is received
cache_dir:
./cache where the cache file is stored
"""
for fn in os.listdir(cache_dir):
if ".csv" in fn:
os.remove(join(cache_dir, fn))
df.to_csv(join(cache_dir, "pulled_until_%s.csv") % _end_date.strftime("%Y%m%d"), index=False)
| 37.309904 | 120 | 0.626135 |
import io
from os.path import join
import os
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from imap_tools import MailBox, A, AND
def get_from_email(start_date, end_date, mail_server,
account, sender, password):
time_flag = None
df = pd.DataFrame(columns=['SofiaSerNum', 'TestDate', 'Facility', 'City',
'State', 'Zip', 'PatientAge', 'Result1', 'Result2',
'OverallResult', 'County', 'FacilityType', 'Assay',
'SCO1', 'SCO2', 'CLN', 'CSN', 'InstrType',
'StorageDate', 'ResultId', 'SarsTestNumber'])
with MailBox(mail_server).login(account, password, 'INBOX') as mailbox:
for search_date in [start_date + timedelta(days=x)
for x in range((end_date - start_date).days + 1)]:
for message in mailbox.fetch(A(AND(date=search_date.date(), from_=sender))):
for att in message.attachments:
name = att.filename
if "Sars" not in name:
continue
print("Pulling data received on %s"%search_date.date())
toread = io.BytesIO()
toread.write(att.payload)
toread.seek(0)
newdf = pd.read_excel(toread)
df = df.append(newdf)
time_flag = search_date
return df, time_flag
def fix_zipcode(df):
zipcode5 = []
fixnum = 0
for zipcode in df['Zip'].values:
if isinstance(zipcode, str) and '-' in zipcode:
zipcode5.append(int(zipcode.split('-')[0]))
fixnum += 1
else:
zipcode = int(float(zipcode))
zipcode5.append(zipcode)
df['zip'] = zipcode5
return df
def fix_date(df):
df.insert(2, "timestamp", df["TestDate"])
mask = df["TestDate"] <= df["StorageDate"]
print("Removing %.2f%% of unusual data" % ((len(df) - np.sum(mask)) * 100 / len(df)))
df = df[mask]
mask = df["StorageDate"] - df["TestDate"] > pd.Timedelta(days=90)
print("Fixing %.2f%% of outdated data" % (np.sum(mask) * 100 / len(df)))
df["timestamp"].values[mask] = df["StorageDate"].values[mask]
return df
def preprocess_new_data(start_date, end_date, mail_server, account,
sender, password, test_mode):
if test_mode:
test_data_dir = "./test_data/test_data.xlsx"
df, time_flag = pd.read_excel(test_data_dir), datetime(2020, 8, 17)
else:
df, time_flag = get_from_email(start_date, end_date, mail_server,
account, sender, password)
if time_flag is None:
return df, time_flag
df = fix_zipcode(df)
df = fix_date(df)
overall_pos = df[df["OverallResult"] == "positive"].groupby(
by=["timestamp", "zip"],
as_index=False)['OverallResult'].count()
overall_pos["positiveTest"] = overall_pos["OverallResult"]
overall_pos.drop(labels="OverallResult", axis="columns", inplace=True)
overall_total = df.groupby(
by=["timestamp", "zip"],
as_index=False)['OverallResult'].count()
overall_total["totalTest"] = overall_total["OverallResult"]
overall_total.drop(labels="OverallResult", axis="columns", inplace=True)
numUniqueDevices = df.groupby(
by=["timestamp", "zip"],
as_index=False)["SofiaSerNum"].agg({"SofiaSerNum": "nunique"}).rename(
columns={"SofiaSerNum": "numUniqueDevices"}
)
df_merged = overall_total.merge(
numUniqueDevices, on=["timestamp", "zip"], how="left"
).merge(
overall_pos, on=["timestamp", "zip"], how="left"
).fillna(0).drop_duplicates()
return df_merged, time_flag
def check_intermediate_file(cache_dir, pull_start_date):
for filename in os.listdir(cache_dir):
if ".csv" in filename:
pull_start_date = datetime.strptime(filename.split("_")[2].split(".")[0],
'%Y%m%d') + timedelta(days=1)
previous_df = pd.read_csv(os.path.join(cache_dir, filename),
sep=",", parse_dates=["timestamp"])
return previous_df, pull_start_date
return None, pull_start_date
def pull_quidel_covidtest(params):
cache_dir = params["cache_dir"]
mail_server = params["mail_server"]
account = params["account"]
password = params["password"]
sender = params["sender"]
test_mode = (params["mode"] == "test")
previous_df, pull_start_date = check_intermediate_file(
cache_dir,
datetime.strptime(params["pull_start_date"], '%Y-%m-%d'))
if params["pull_end_date"] == "":
pull_end_date = datetime.today()
else:
pull_end_date = datetime.strptime(params["pull_end_date"], '%Y-%m-%d')
df, _end_date = preprocess_new_data(
pull_start_date, pull_end_date, mail_server,
account, sender, password, test_mode)
if previous_df is not None:
df = previous_df.append(df).groupby(["timestamp", "zip"]).sum().reset_index()
return df, _end_date
def check_export_end_date(input_export_end_date, _end_date,
end_from_today_minus):
export_end_date = _end_date - timedelta(days=end_from_today_minus)
if input_export_end_date != "":
input_export_end_date = datetime.strptime(input_export_end_date, '%Y-%m-%d')
if input_export_end_date < export_end_date:
return input_export_end_date
return export_end_date
def check_export_start_date(export_start_date, export_end_date,
export_day_range):
if export_start_date == "":
export_start_date = datetime(2020, 5, 26)
else:
export_start_date = datetime.strptime(export_start_date, '%Y-%m-%d')
if (export_end_date - export_start_date).days > export_day_range:
export_start_date = export_end_date - timedelta(days=export_day_range)
if export_start_date < datetime(2020, 5, 26):
return datetime(2020, 5, 26)
return export_start_date
def update_cache_file(df, _end_date, cache_dir):
for fn in os.listdir(cache_dir):
if ".csv" in fn:
os.remove(join(cache_dir, fn))
df.to_csv(join(cache_dir, "pulled_until_%s.csv") % _end_date.strftime("%Y%m%d"), index=False)
| true | true |
f7ff5e2660887a9dab526dde6ed630940a403a2e | 488 | py | Python | interviewbit-python/anti-diagonals.py | sanketg186/100DaysOfCode | d44c7d4ac7b3fcf0cb91ab027a4cca133e433209 | [
"MIT"
] | null | null | null | interviewbit-python/anti-diagonals.py | sanketg186/100DaysOfCode | d44c7d4ac7b3fcf0cb91ab027a4cca133e433209 | [
"MIT"
] | null | null | null | interviewbit-python/anti-diagonals.py | sanketg186/100DaysOfCode | d44c7d4ac7b3fcf0cb91ab027a4cca133e433209 | [
"MIT"
] | null | null | null | # Anti Diagonals
# Input:
# 1 2 3
# 4 5 6
# 7 8 9
# Return the following :
# [
# [1],
# [2, 4],
# [3, 5, 7],
# [6, 8],
# [9]
# ]
class Solution:
# @param A : list of list of integers
# @return a list of list of integers
def diagonal(self, A):
r = len(A)
c = len(A[0])
res = [[] for i in range(0,r+c-1)]
for i in range(0,r):
for j in range(0,c):
res[i+j].append(A[i][j])
return res
| 16.266667 | 43 | 0.438525 |
class Solution:
def diagonal(self, A):
r = len(A)
c = len(A[0])
res = [[] for i in range(0,r+c-1)]
for i in range(0,r):
for j in range(0,c):
res[i+j].append(A[i][j])
return res
| true | true |
f7ff5f0c7aab140b1cc2fc163f8c182922aff288 | 8,919 | py | Python | spring-2019-models/scripts/variables.py | ual/ual_model_workspace | 2debccedb176d1fe5476f6c8c84d86f411612019 | [
"BSD-3-Clause"
] | 1 | 2019-02-20T00:10:49.000Z | 2019-02-20T00:10:49.000Z | spring-2019-models/scripts/variables.py | ual/ual_model_workspace | 2debccedb176d1fe5476f6c8c84d86f411612019 | [
"BSD-3-Clause"
] | 1 | 2019-02-06T00:50:20.000Z | 2019-02-06T00:50:20.000Z | spring-2019-models/scripts/variables.py | ual/ual_model_workspace | 2debccedb176d1fe5476f6c8c84d86f411612019 | [
"BSD-3-Clause"
] | 1 | 2019-02-20T00:22:40.000Z | 2019-02-20T00:22:40.000Z | import orca
from urbansim.utils import misc
#########################
# ZONES VARIABLES #
#########################
# these are primarily used for calculating skim-based
# acccessibilities
@orca.column('zones', cache=True)
def total_jobs(jobs, zones):
return jobs.zone_id_work.groupby(
jobs.zone_id_work).count().reindex(zones.index).fillna(0)
@orca.column('zones')
def sum_residential_units(parcels, buildings, zones):
s = buildings.residential_units.groupby(
buildings.parcel_id).sum().groupby(parcels.zone_id).sum()
return s.reindex(zones.index).fillna(0)
@orca.column('zones', cache=True)
def sum_persons(households, buildings, parcels, zones):
s = households.persons.groupby(
households.building_id).sum().groupby(
buildings.parcel_id).sum().groupby(parcels.zone_id).sum()
return s.reindex(zones.index).fillna(0)
@orca.column('zones', cache=True)
def sum_income(households, buildings, parcels, zones):
s = households.income.groupby(
households.building_id).sum().groupby(
buildings.parcel_id).sum().groupby(parcels.zone_id).sum()
return s.reindex(zones.index).fillna(0)
@orca.column('zones', cache=True)
def avg_income(households, buildings, parcels, zones):
s = households.income.groupby(
households.building_id).mean().groupby(
buildings.parcel_id).mean().groupby(parcels.zone_id).mean()
return s.reindex(zones.index).fillna(0)
############################
# small drive network vars #
############################
@orca.column('parcels')
def node_id_small(parcels, netsmall):
idssmall_parcel = netsmall.get_node_ids(parcels.x, parcels.y)
return idssmall_parcel
@orca.column('rentals')
def node_id_small(rentals, netsmall):
idssmall_rentals = netsmall.get_node_ids(
rentals.longitude, rentals.latitude)
return idssmall_rentals
@orca.column('buildings')
def node_id_small(parcels, buildings):
return misc.reindex(parcels.node_id_small, buildings.parcel_id)
@orca.column('units')
def node_id_small(buildings, units):
return misc.reindex(buildings.node_id_small, units.building_id)
@orca.column('households')
def node_id_small(units, households):
return misc.reindex(units.node_id_small, households.unit_id)
@orca.column('persons')
def node_id_small(households, persons):
return misc.reindex(households.node_id_small, persons.household_id)
@orca.column('jobs')
def node_id_small(buildings, jobs):
return misc.reindex(buildings.node_id_small, jobs.building_id)
###########################
# walk network vars #
###########################
@orca.column('parcels')
def node_id_walk(parcels, netwalk):
idswalk_parcel = netwalk.get_node_ids(parcels.x, parcels.y)
return idswalk_parcel
@orca.column('rentals')
def node_id_walk(rentals, netwalk):
idswalk_rentals = netwalk.get_node_ids(rentals.longitude, rentals.latitude)
return idswalk_rentals
@orca.column('buildings')
def node_id_walk(parcels, buildings):
return misc.reindex(parcels.node_id_walk, buildings.parcel_id)
@orca.column('units')
def node_id_walk(buildings, units):
return misc.reindex(buildings.node_id_walk, units.building_id)
@orca.column('households')
def node_id_walk(units, households):
return misc.reindex(units.node_id_walk, households.unit_id)
@orca.column('persons')
def node_id_walk(households, persons):
return misc.reindex(households.node_id_walk, persons.household_id)
@orca.column('jobs')
def node_id_walk(buildings, jobs):
return misc.reindex(buildings.node_id_walk, jobs.building_id)
###############################
# WLCM dummy columns #
###############################
@orca.column('jobs')
def sector_retail(jobs):
return jobs['sector_id'].isin([44, 45]).astype(int)
@orca.column('jobs')
def sector_healthcare(jobs):
return jobs['sector_id'].isin([62]).astype(int)
@orca.column('jobs')
def sector_tech(jobs):
return jobs['sector_id'].isin([51, 54]).astype(int)
@orca.column('jobs')
def sector_food_and_hosp(jobs):
return jobs['sector_id'].isin([72]).astype(int)
@orca.column('jobs')
def sector_mfg(jobs):
return jobs['sector_id'].isin([31, 32, 33]).astype(int)
@orca.column('jobs')
def sector_edu_serv(jobs):
return jobs['sector_id'].isin([61]).astype(int)
@orca.column('jobs')
def sector_oth_serv(jobs):
return jobs['sector_id'].isin([81]).astype(int)
@orca.column('jobs')
def sector_constr(jobs):
return jobs['sector_id'].isin([23]).astype(int)
@orca.column('jobs')
def sector_gov(jobs):
return jobs['sector_id'].isin([92]).astype(int)
@orca.column('jobs')
def sector_fire(jobs):
return jobs['sector_id'].isin([52, 53]).astype(int)
@orca.column('jobs')
def sector_whlsale(jobs):
return jobs['sector_id'].isin([42]).astype(int)
@orca.column('jobs')
def sector_admin(jobs):
return jobs['sector_id'].isin([56]).astype(int)
@orca.column('jobs')
def sector_transport(jobs):
return jobs['sector_id'].isin([48]).astype(int)
@orca.column('jobs')
def sector_arts(jobs):
return jobs['sector_id'].isin([71]).astype(int)
@orca.column('jobs')
def sector_util(jobs):
return jobs['sector_id'].isin([22]).astype(int)
@orca.column('jobs')
def parcel_id(jobs, buildings):
return misc.reindex(
buildings.parcel_id, jobs.building_id)
@orca.column('persons')
def no_higher_ed(persons):
return (persons['edu'] < 21).astype(int)
@orca.column('persons')
def age_under_45(persons):
return (persons['age'] < 45).astype(int)
@orca.column('households')
def hh_inc_under_25k(households):
return ((
households['income'] < 25000) & (
households['income'] > 10)).astype(int)
@orca.column('households')
def hh_inc_25_to_75k(households):
return ((
households['income'] >= 25000) & (
households['persons'] < 75000)).astype(int)
@orca.column('households')
def hh_inc_75_to_200k(households):
return ((
households['income'] >= 75000) & (
households['income'] < 200000)).astype(int)
# cols for WLCM interaction terms
@orca.column('jobs')
def zone_id_work(jobs, parcels):
return misc.reindex(
parcels.zone_id, jobs.parcel_id)
@orca.column('persons')
def zone_id_home(persons, households, units, buildings, parcels):
return misc.reindex(
orca.merge_tables(
households, [households, units, buildings, parcels],
columns=['zone_id'])['zone_id'],
persons.household_id).astype(float)
#########################################
# Auto ownership dummy columns #
#########################################
@orca.column('households')
def tenure_1(households):
return (households['tenure'] == 1).astype(int)
@orca.column('households')
def tenure_2(households):
return (households['tenure'] == 2).astype(int)
@orca.column('households')
def tenure_3(households):
return (households['tenure'] == 3).astype(int)
@orca.column('households')
def tenure_4(households):
return (households['tenure'] == 4).astype(int)
@orca.column('households')
def single_family_int(households):
return households['single_family'].astype(int)
@orca.column('households')
def building_type_2(households):
return (households['building_type'] == 2).astype(int)
###########################
# TOD choice dummy vars #
###########################
@orca.column('households')
def hh_inc_150kplus(households):
return((
households['income'] > 150000) | (
households['income'] == 150000)).astype(int)
@orca.column('persons')
def lessGED(persons):
return(persons['edu'] < 16).astype(int)
@orca.column('persons')
def GED(persons):
return(persons['edu'].isin([16,17])).astype(int)
@orca.column('persons')
def somebach(persons):
return(persons['edu'].isin([16,17])).astype(int)
@orca.column('persons')
def Assoc(persons):
return(persons['edu'].isin([20])).astype(int)
@orca.column('persons')
def Bach(persons):
return(persons['edu'].isin([21])).astype(int)
@orca.column('persons')
def female(persons):
return (persons['sex'] - 1)
@orca.column('persons')
def white(persons):
return(persons['race_id'].isin([1.0])).astype(int)
@orca.column('persons')
def minority(persons):
return(persons['white'].isin([0.0])).astype(int)
@orca.column('persons')
def age_16less25(persons):
return((persons.age.between(16,25,inclusive = False)) | (persons.age==16)).astype(int)
@orca.column('households')
def hh_size_1per(households):
return(households.persons.isin([1.0])).astype(int)
@orca.column('jobs')
def finance(jobs):
return jobs['sector_id'].isin([52]).astype(int)
@orca.column('jobs')
def info(jobs):
return jobs['sector_id'].isin([51]).astype(int)
@orca.column('jobs')
def scitech(jobs):
return jobs['sector_id'].isin([54]).astype(int)
| 25.266289 | 90 | 0.666442 | import orca
from urbansim.utils import misc
parcel_id).sum().groupby(parcels.zone_id).sum()
return s.reindex(zones.index).fillna(0)
@orca.column('zones', cache=True)
def sum_income(households, buildings, parcels, zones):
s = households.income.groupby(
households.building_id).sum().groupby(
buildings.parcel_id).sum().groupby(parcels.zone_id).sum()
return s.reindex(zones.index).fillna(0)
@orca.column('zones', cache=True)
def avg_income(households, buildings, parcels, zones):
s = households.income.groupby(
households.building_id).mean().groupby(
buildings.parcel_id).mean().groupby(parcels.zone_id).mean()
return s.reindex(zones.index).fillna(0)
sons')
def node_id_small(households, persons):
return misc.reindex(households.node_id_small, persons.household_id)
@orca.column('jobs')
def node_id_small(buildings, jobs):
return misc.reindex(buildings.node_id_small, jobs.building_id)
unit_id)
@orca.column('persons')
def node_id_walk(households, persons):
return misc.reindex(households.node_id_walk, persons.household_id)
@orca.column('jobs')
def node_id_walk(buildings, jobs):
return misc.reindex(buildings.node_id_walk, jobs.building_id)
a.column('jobs')
def sector_fire(jobs):
return jobs['sector_id'].isin([52, 53]).astype(int)
@orca.column('jobs')
def sector_whlsale(jobs):
return jobs['sector_id'].isin([42]).astype(int)
@orca.column('jobs')
def sector_admin(jobs):
return jobs['sector_id'].isin([56]).astype(int)
@orca.column('jobs')
def sector_transport(jobs):
return jobs['sector_id'].isin([48]).astype(int)
@orca.column('jobs')
def sector_arts(jobs):
return jobs['sector_id'].isin([71]).astype(int)
@orca.column('jobs')
def sector_util(jobs):
return jobs['sector_id'].isin([22]).astype(int)
@orca.column('jobs')
def parcel_id(jobs, buildings):
return misc.reindex(
buildings.parcel_id, jobs.building_id)
@orca.column('persons')
def no_higher_ed(persons):
return (persons['edu'] < 21).astype(int)
@orca.column('persons')
def age_under_45(persons):
return (persons['age'] < 45).astype(int)
@orca.column('households')
def hh_inc_under_25k(households):
return ((
households['income'] < 25000) & (
households['income'] > 10)).astype(int)
@orca.column('households')
def hh_inc_25_to_75k(households):
return ((
households['income'] >= 25000) & (
households['persons'] < 75000)).astype(int)
@orca.column('households')
def hh_inc_75_to_200k(households):
return ((
households['income'] >= 75000) & (
households['income'] < 200000)).astype(int)
@orca.column('jobs')
def zone_id_work(jobs, parcels):
return misc.reindex(
parcels.zone_id, jobs.parcel_id)
@orca.column('persons')
def zone_id_home(persons, households, units, buildings, parcels):
return misc.reindex(
orca.merge_tables(
households, [households, units, buildings, parcels],
columns=['zone_id'])['zone_id'],
persons.household_id).astype(float)
| true | true |
f7ff60911bd3729c133306dd12a054998bd9199f | 4,317 | py | Python | llvmpy/src/MC/__init__.py | KennethNielsen/llvmpy | 70c5957cfd10f1e32a44f28dcb9a4dc72d499c2e | [
"BSD-3-Clause"
] | 140 | 2015-01-07T20:58:12.000Z | 2022-01-21T17:02:21.000Z | llvmpy/src/MC/__init__.py | KennethNielsen/llvmpy | 70c5957cfd10f1e32a44f28dcb9a4dc72d499c2e | [
"BSD-3-Clause"
] | 19 | 2015-01-15T14:45:49.000Z | 2020-09-04T14:58:23.000Z | llvmpy/src/MC/__init__.py | KennethNielsen/llvmpy | 70c5957cfd10f1e32a44f28dcb9a4dc72d499c2e | [
"BSD-3-Clause"
] | 12 | 2015-01-12T01:49:32.000Z | 2020-07-10T22:30:38.000Z | #this file is not processed unless the llvm library is
#version 3.4 or higher. see llvmpy/__init__.py for details.
from binding import *
from ..namespace import llvm
from ..Support.StringRefMemoryObject import MemoryObject
from ..Support.raw_ostream import raw_ostream
from src.ADT.StringRef import StringRef
MCSubtargetInfo = llvm.Class()
MCDisassembler = llvm.Class()
MCInst = llvm.Class()
MCOperand = llvm.Class()
MCExpr = llvm.Class()
MCAsmInfo = llvm.Class()
MCRegisterInfo = llvm.Class()
MCInstrInfo = llvm.Class()
MCInstrAnalysis = llvm.Class()
MCInstPrinter = llvm.Class()
MCInstrDesc = llvm.Class()
TargetSubtargetInfo = llvm.Class(MCSubtargetInfo)
TargetInstrInfo = llvm.Class(MCInstrInfo)
TargetRegisterInfo = llvm.Class(MCRegisterInfo)
@MCInstrDesc
class MCInstrDesc:
_include_ = "llvm/MC/MCInstrDesc.h"
TSFlags = Attr(getter=cast(Uint64, int), setter=cast(int, Uint64))
getFlags = Method(cast(Unsigned, int))
getOpcode = Method(cast(Unsigned, int))
def _ret_bool():
return Method(cast(Bool, bool))
isReturn = _ret_bool()
isCall = _ret_bool()
isBarrier = _ret_bool()
isBranch = _ret_bool()
isTerminator = _ret_bool()
isIndirectBranch = _ret_bool()
isConditionalBranch = _ret_bool()
isUnconditionalBranch = _ret_bool()
@MCSubtargetInfo
class MCSubtargetInfo:
pass
@TargetSubtargetInfo
class TargetSubtargetInfo:
_include_ = 'llvm/Target/TargetSubtargetInfo.h'
@MCExpr
class MCExpr:
_include_ = "llvm/MC/MCExpr.h"
ExprKind = Enum('Binary', 'Constant', 'SymbolRef', 'Unary', 'Target')
getKind = Method(ExprKind)
@MCOperand
class MCOperand:
_include_ = "llvm/MC/MCInst.h"
isValid = Method(cast(Bool, bool))
isReg = Method(cast(Bool, bool))
isImm = Method(cast(Bool, bool))
isFPImm = Method(cast(Bool, bool))
isExpr = Method(cast(Bool, bool))
isInst = Method(cast(Bool, bool))
getReg = Method(cast(Unsigned, int))
getImm = Method(cast(Int64, int))
getFPImm = Method(cast(Double, float))
getExpr = Method(const(ownedptr(MCExpr)))
@MCInst
class MCInst:
_include_ = "llvm/MC/MCInst.h"
new = Constructor()
size = Method(cast(Size_t, int))
getNumOperands = Method(cast(Unsigned, int))
getOperand = Method(const(ref(MCOperand)), cast(int, Unsigned))
getOpcode = Method(cast(Unsigned, int))
MCOperand.getInst = Method(const(ownedptr(MCInst)))
@MCAsmInfo
class MCAsmInfo:
_include_ = "llvm/MC/MCAsmInfo.h"
getAssemblerDialect = Method(cast(Unsigned, int))
getMinInstAlignment = Method(cast(Unsigned, int))
isLittleEndian = Method(cast(Bool, bool))
@MCRegisterInfo
class MCRegisterInfo:
_include_ = "llvm/MC/MCRegisterInfo.h"
getName = Method(cast(ConstCharPtr, str), cast(int, Unsigned))
@TargetRegisterInfo
class TargetRegisterInfo:
_include_ = "llvm/Target/TargetRegisterInfo.h"
@MCInstrInfo
class MCInstrInfo:
_include_ = "llvm/MC/MCInstrInfo.h"
get = Method(const(ref(MCInstrDesc)), cast(int, Unsigned))
@TargetInstrInfo
class TargetInstrInfo:
_include_ = 'llvm/Target/TargetInstrInfo.h'
@MCInstrAnalysis
class MCInstrAnalysis:
_include_ = "llvm/MC/MCInstrAnalysis.h"
def _take_mcinst_ret_bool():
return Method(cast(Bool, bool), const(ref(MCInst)))
isBranch = _take_mcinst_ret_bool()
isConditionalBranch = _take_mcinst_ret_bool()
isUnconditionalBranch = _take_mcinst_ret_bool()
isIndirectBranch = _take_mcinst_ret_bool()
isCall = _take_mcinst_ret_bool()
isReturn = _take_mcinst_ret_bool()
isTerminator = _take_mcinst_ret_bool()
@MCInstPrinter
class MCInstPrinter:
_include_ = "llvm/MC/MCInstPrinter.h"
printInst = Method(Void,
const(ptr(MCInst)), #MI
ref(raw_ostream), #OS
cast(str, StringRef) #Annot
)
@MCDisassembler
class MCDisassembler:
_include_ = "llvm/MC/MCDisassembler.h"
DecodeStatus = Enum('Fail', 'SoftFail', 'Success')
getInstruction = CustomMethod('MCDisassembler_getInstruction',
PyObjectPtr,
ref(MCInst),
ref(MemoryObject),
cast(int, Uint64)
)
| 27.322785 | 73 | 0.67987 |
from binding import *
from ..namespace import llvm
from ..Support.StringRefMemoryObject import MemoryObject
from ..Support.raw_ostream import raw_ostream
from src.ADT.StringRef import StringRef
MCSubtargetInfo = llvm.Class()
MCDisassembler = llvm.Class()
MCInst = llvm.Class()
MCOperand = llvm.Class()
MCExpr = llvm.Class()
MCAsmInfo = llvm.Class()
MCRegisterInfo = llvm.Class()
MCInstrInfo = llvm.Class()
MCInstrAnalysis = llvm.Class()
MCInstPrinter = llvm.Class()
MCInstrDesc = llvm.Class()
TargetSubtargetInfo = llvm.Class(MCSubtargetInfo)
TargetInstrInfo = llvm.Class(MCInstrInfo)
TargetRegisterInfo = llvm.Class(MCRegisterInfo)
@MCInstrDesc
class MCInstrDesc:
_include_ = "llvm/MC/MCInstrDesc.h"
TSFlags = Attr(getter=cast(Uint64, int), setter=cast(int, Uint64))
getFlags = Method(cast(Unsigned, int))
getOpcode = Method(cast(Unsigned, int))
def _ret_bool():
return Method(cast(Bool, bool))
isReturn = _ret_bool()
isCall = _ret_bool()
isBarrier = _ret_bool()
isBranch = _ret_bool()
isTerminator = _ret_bool()
isIndirectBranch = _ret_bool()
isConditionalBranch = _ret_bool()
isUnconditionalBranch = _ret_bool()
@MCSubtargetInfo
class MCSubtargetInfo:
pass
@TargetSubtargetInfo
class TargetSubtargetInfo:
_include_ = 'llvm/Target/TargetSubtargetInfo.h'
@MCExpr
class MCExpr:
_include_ = "llvm/MC/MCExpr.h"
ExprKind = Enum('Binary', 'Constant', 'SymbolRef', 'Unary', 'Target')
getKind = Method(ExprKind)
@MCOperand
class MCOperand:
_include_ = "llvm/MC/MCInst.h"
isValid = Method(cast(Bool, bool))
isReg = Method(cast(Bool, bool))
isImm = Method(cast(Bool, bool))
isFPImm = Method(cast(Bool, bool))
isExpr = Method(cast(Bool, bool))
isInst = Method(cast(Bool, bool))
getReg = Method(cast(Unsigned, int))
getImm = Method(cast(Int64, int))
getFPImm = Method(cast(Double, float))
getExpr = Method(const(ownedptr(MCExpr)))
@MCInst
class MCInst:
_include_ = "llvm/MC/MCInst.h"
new = Constructor()
size = Method(cast(Size_t, int))
getNumOperands = Method(cast(Unsigned, int))
getOperand = Method(const(ref(MCOperand)), cast(int, Unsigned))
getOpcode = Method(cast(Unsigned, int))
MCOperand.getInst = Method(const(ownedptr(MCInst)))
@MCAsmInfo
class MCAsmInfo:
_include_ = "llvm/MC/MCAsmInfo.h"
getAssemblerDialect = Method(cast(Unsigned, int))
getMinInstAlignment = Method(cast(Unsigned, int))
isLittleEndian = Method(cast(Bool, bool))
@MCRegisterInfo
class MCRegisterInfo:
_include_ = "llvm/MC/MCRegisterInfo.h"
getName = Method(cast(ConstCharPtr, str), cast(int, Unsigned))
@TargetRegisterInfo
class TargetRegisterInfo:
_include_ = "llvm/Target/TargetRegisterInfo.h"
@MCInstrInfo
class MCInstrInfo:
_include_ = "llvm/MC/MCInstrInfo.h"
get = Method(const(ref(MCInstrDesc)), cast(int, Unsigned))
@TargetInstrInfo
class TargetInstrInfo:
_include_ = 'llvm/Target/TargetInstrInfo.h'
@MCInstrAnalysis
class MCInstrAnalysis:
_include_ = "llvm/MC/MCInstrAnalysis.h"
def _take_mcinst_ret_bool():
return Method(cast(Bool, bool), const(ref(MCInst)))
isBranch = _take_mcinst_ret_bool()
isConditionalBranch = _take_mcinst_ret_bool()
isUnconditionalBranch = _take_mcinst_ret_bool()
isIndirectBranch = _take_mcinst_ret_bool()
isCall = _take_mcinst_ret_bool()
isReturn = _take_mcinst_ret_bool()
isTerminator = _take_mcinst_ret_bool()
@MCInstPrinter
class MCInstPrinter:
_include_ = "llvm/MC/MCInstPrinter.h"
printInst = Method(Void,
const(ptr(MCInst)),
ref(raw_ostream),
cast(str, StringRef)
)
@MCDisassembler
class MCDisassembler:
_include_ = "llvm/MC/MCDisassembler.h"
DecodeStatus = Enum('Fail', 'SoftFail', 'Success')
getInstruction = CustomMethod('MCDisassembler_getInstruction',
PyObjectPtr,
ref(MCInst),
ref(MemoryObject),
cast(int, Uint64)
)
| true | true |
f7ff637ca69f18fad1d65b1ecc8628716b6b449e | 8,645 | py | Python | code/src/main/python/analysis/helpers/ast_utils.py | anonfse/COSAL_Anonymized | 709906294fd775131f3e019862bbdd554d83773d | [
"Unlicense"
] | null | null | null | code/src/main/python/analysis/helpers/ast_utils.py | anonfse/COSAL_Anonymized | 709906294fd775131f3e019862bbdd554d83773d | [
"Unlicense"
] | 1 | 2021-11-03T08:28:31.000Z | 2021-11-03T08:28:31.000Z | code/src/main/python/analysis/helpers/ast_utils.py | anonfse/COSAL_Anonymized | 709906294fd775131f3e019862bbdd554d83773d | [
"Unlicense"
] | 1 | 2022-03-22T14:24:13.000Z | 2022-03-22T14:24:13.000Z | import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "COSAL"
import ast
import astor
import astpretty
import asttokens
import keyword
import textwrap
import tokenize
from io import StringIO
__KEYWORDS = None
IGNORE_TERMS = {"init", "args", "kwargs", "kwds", "self"}
def get_keywords():
global __KEYWORDS, IGNORE_TERMS
if __KEYWORDS is None:
__KEYWORDS = set(keyword.kwlist).union(IGNORE_TERMS)
return __KEYWORDS
def get_future_import_str():
return "from __future__ import print_function"
def parse(source_code, use_ast_tokens=False):
try:
tree = ast.parse(source_code)
except SyntaxError:
source_code = "%s\n%s" % (get_future_import_str(), source_code)
try:
tree = ast.parse(source_code)
except SyntaxError as e:
return None
if use_ast_tokens:
ast_tokenized = asttokens.ASTTokens(source_code, tree=tree)
ast_tokenized.tree.asttokens_obj = ast_tokenized
return ast_tokenized.tree
return tree
def parse_function(source_code, use_ast_tokens=False):
"""
Convert source code to node
:param source_code: Source code as string
:param use_ast_tokens: If true use `asttokens` library to parse code.
:return: Return instance of `ast.FunctionDef`
"""
parsed = parse(source_code, use_ast_tokens=use_ast_tokens)
if parsed and len(parsed.body) > 0:
for node in parsed.body:
if isinstance(node, ast.FunctionDef):
return node
return None
def update_function_name(func_node, name):
"""
Update name of function AST
:param func_node: instance of `ast.FunctionDef`
:param name: New name of function
:return: New function node with new name
"""
return ast.FunctionDef(
name=name,
args=func_node.args,
body=func_node.body,
decorator_list=func_node.decorator_list if hasattr(func_node, 'decorator_list') else []
)
def convert_ast_to_code(node, asttokens_obj=None):
"""
Convert ast to source code
:param node: Instance of `ast.Node`
:param asttokens_obj: Instance of `asttokens.ASTTokens`
:return: Instance of source code
"""
if asttokens_obj:
return asttokens_obj.get_text(node)
return astor.to_source(node)
def get_func_node(func_obj, use_ast_tokens=False):
"""
Convert function string as `ast.FunctionDef` if its not and instance
:param func_obj: Instance of `ast.FunctionDef` or a string
:param use_ast_tokens: If true use `asttokens` library to parse code.
:return: Instance of `ast.FunctionDef`
"""
if type(func_obj) == ast.FunctionDef:
return func_obj
elif type(func_obj) in [str, unicode]:
return parse_function(func_obj, use_ast_tokens=use_ast_tokens)
else:
raise RuntimeError("@COSAL: Unsupported function type for function '%s'!!" % type(func_obj).__name__)
def get_function_body_as_str(func_obj, as_lst=False, use_ast_tokens=False):
"""
Get body of function as str
:param func_obj: Instance of `ast.FunctionDef` or a string
:param as_lst: True / False
:param use_ast_tokens: If true use `asttokens` library to parse code.
:return: Body of function as list or str
"""
func_node = get_func_node(func_obj, use_ast_tokens=use_ast_tokens)
if not func_node or not func_node.body:
return None
body = []
for stmt in func_node.body:
body.append(convert_ast_to_code(stmt))
if as_lst:
return body
return "".join(body).strip()
def get_func_name(func_obj, use_ast_tokens=False):
"""
Get name of function
:param func_obj: Instance of `ast.FunctionDef` or a string
:param use_ast_tokens: If true use `asttokens` library to parse code.
:return: name of the function
"""
func_node = get_func_node(func_obj, use_ast_tokens=use_ast_tokens)
if not func_node:
return None
return func_node.name
def get_arg_names(func_obj, use_ast_tokens=False):
"""
Get names of function arguments
:param func_obj: Instance of `ast.FunctionDef` or a string
:param use_ast_tokens: If true use `asttokens` library to parse code.
:return: List of names of args
"""
func_node = get_func_node(func_obj)
if func_node.args and func_node.args.args:
return [arg.id for arg in func_node.args.args]
return None
def pretty_print(node, indent=' '*2, show_offsets=False):
"""
Pretty print node
:param node: Instance of `ast.Node`
:param indent: Number of spaces to indent
:param show_offsets: Show offsets. Boolean
:return:
"""
astpretty.pprint(node, indent=indent, show_offsets=show_offsets)
def parse_print(node, annotate_fields=True, include_attributes=True):
"""
Print node
:param node: instance of `ast.Node`
:param annotate_fields: If true, prints fields
:param include_attributes:
"""
print(ast.dump(node, annotate_fields, include_attributes))
def has_return_statement(func_obj):
"""
Check if the function has a return statement
:param func_obj: Instance of function object
:return: True / False
"""
func_node = get_func_node(func_obj)
return any(isinstance(node, ast.Return) for node in ast.walk(func_node))
def create_function(name, args, body):
"""
Create a function node
:param name: Name of the functions
:param args: Arguments of the function
:param body: Body of the function
:return: Instance of `ast.FunctionDef`
"""
return ast.FunctionDef(name=name, args=args, body=body, decorator_list=[])
def create_return_statement(var_name=None):
"""
Create a return statement node
:param var_name: Name of variable to be returned
:return: Instance of `ast.Return`
"""
if var_name:
return ast.Return(value=ast.Name(id=var_name))
else:
return ast.Return()
def remove_comments_and_docstrings(source):
"""
Returns 'source' minus comments and docstrings.
"""
io_obj = StringIO(source.encode("utf-8"))
out = ""
prev_toktype = tokenize.INDENT
last_lineno = -1
last_col = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
ltext = tok[4]
# The following two conditionals preserve indentation.
# This is necessary because we' re not using tokenize.untokenize()
# (because it spits out code with copious amounts of oddly-placed
# whitespace).
if start_line > last_lineno:
last_col = 0
if start_col > last_col:
out += (" " * (start_col - last_col))
# Remove comments:
if token_type == tokenize.COMMENT:
pass
# This series of conditionals removes docstrings:
elif token_type == tokenize.STRING:
if prev_toktype != tokenize.INDENT:
# This is likely a docstring; double-check we're not inside an operator:
if prev_toktype != tokenize.NEWLINE:
# Note regarding NEWLINE vs NL: The tokenize module
# differentiates between newlines that start a new statement
# and newlines inside of operators such as parens, brackes,
# and curly braces. Newlines inside of operators are
# NEWLINE and newlines that start new code are NL.
# Catch whole-module docstrings:
if start_col > 0:
# Unlabelled indentation means we're inside an operator
out += token_string
# Note regarding the INDENT token: The tokenize module does
# not label indentation inside of an operator (parens,
# brackets, and curly braces) as actual indentation.
# For example:
# def foo():
# "The spaces before this docstring are tokenize.INDENT"
# test = [
# "The spaces before this string do not get a token"
# ]
else:
out += token_string
prev_toktype = token_type
last_col = end_col
last_lineno = end_line
return out
def get_comments(source_code):
"""
:param source_code: Source code as text
:return: Comments in source code as a list
"""
io_obj = StringIO(source_code.encode("utf-8"))
comments = []
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.COMMENT:
comments.append(token_string.replace("#", " ").strip())
return comments
def get_docstring(node):
"""
Return the docstring for the given node or None if no docstring can be found.
:param node: Instance of java node
:return: Doc String
"""
try:
return ast.get_docstring(node)
except TypeError:
return None
def format_code(source_code):
"""
Format source code
:param source_code: Source code as str
:return: Formatted source code as str
"""
return textwrap.dedent(source_code) | 29.010067 | 105 | 0.705032 | import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "COSAL"
import ast
import astor
import astpretty
import asttokens
import keyword
import textwrap
import tokenize
from io import StringIO
__KEYWORDS = None
IGNORE_TERMS = {"init", "args", "kwargs", "kwds", "self"}
def get_keywords():
global __KEYWORDS, IGNORE_TERMS
if __KEYWORDS is None:
__KEYWORDS = set(keyword.kwlist).union(IGNORE_TERMS)
return __KEYWORDS
def get_future_import_str():
return "from __future__ import print_function"
def parse(source_code, use_ast_tokens=False):
try:
tree = ast.parse(source_code)
except SyntaxError:
source_code = "%s\n%s" % (get_future_import_str(), source_code)
try:
tree = ast.parse(source_code)
except SyntaxError as e:
return None
if use_ast_tokens:
ast_tokenized = asttokens.ASTTokens(source_code, tree=tree)
ast_tokenized.tree.asttokens_obj = ast_tokenized
return ast_tokenized.tree
return tree
def parse_function(source_code, use_ast_tokens=False):
parsed = parse(source_code, use_ast_tokens=use_ast_tokens)
if parsed and len(parsed.body) > 0:
for node in parsed.body:
if isinstance(node, ast.FunctionDef):
return node
return None
def update_function_name(func_node, name):
return ast.FunctionDef(
name=name,
args=func_node.args,
body=func_node.body,
decorator_list=func_node.decorator_list if hasattr(func_node, 'decorator_list') else []
)
def convert_ast_to_code(node, asttokens_obj=None):
if asttokens_obj:
return asttokens_obj.get_text(node)
return astor.to_source(node)
def get_func_node(func_obj, use_ast_tokens=False):
if type(func_obj) == ast.FunctionDef:
return func_obj
elif type(func_obj) in [str, unicode]:
return parse_function(func_obj, use_ast_tokens=use_ast_tokens)
else:
raise RuntimeError("@COSAL: Unsupported function type for function '%s'!!" % type(func_obj).__name__)
def get_function_body_as_str(func_obj, as_lst=False, use_ast_tokens=False):
func_node = get_func_node(func_obj, use_ast_tokens=use_ast_tokens)
if not func_node or not func_node.body:
return None
body = []
for stmt in func_node.body:
body.append(convert_ast_to_code(stmt))
if as_lst:
return body
return "".join(body).strip()
def get_func_name(func_obj, use_ast_tokens=False):
func_node = get_func_node(func_obj, use_ast_tokens=use_ast_tokens)
if not func_node:
return None
return func_node.name
def get_arg_names(func_obj, use_ast_tokens=False):
func_node = get_func_node(func_obj)
if func_node.args and func_node.args.args:
return [arg.id for arg in func_node.args.args]
return None
def pretty_print(node, indent=' '*2, show_offsets=False):
astpretty.pprint(node, indent=indent, show_offsets=show_offsets)
def parse_print(node, annotate_fields=True, include_attributes=True):
print(ast.dump(node, annotate_fields, include_attributes))
def has_return_statement(func_obj):
func_node = get_func_node(func_obj)
return any(isinstance(node, ast.Return) for node in ast.walk(func_node))
def create_function(name, args, body):
return ast.FunctionDef(name=name, args=args, body=body, decorator_list=[])
def create_return_statement(var_name=None):
if var_name:
return ast.Return(value=ast.Name(id=var_name))
else:
return ast.Return()
def remove_comments_and_docstrings(source):
io_obj = StringIO(source.encode("utf-8"))
out = ""
prev_toktype = tokenize.INDENT
last_lineno = -1
last_col = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
ltext = tok[4]
# (because it spits out code with copious amounts of oddly-placed
# whitespace).
if start_line > last_lineno:
last_col = 0
if start_col > last_col:
out += (" " * (start_col - last_col))
# Remove comments:
if token_type == tokenize.COMMENT:
pass
# This series of conditionals removes docstrings:
elif token_type == tokenize.STRING:
if prev_toktype != tokenize.INDENT:
# This is likely a docstring; double-check we're not inside an operator:
if prev_toktype != tokenize.NEWLINE:
if start_col > 0:
out += token_string
# Note regarding the INDENT token: The tokenize module does
# not label indentation inside of an operator (parens,
# brackets, and curly braces) as actual indentation.
# For example:
# def foo():
# "The spaces before this docstring are tokenize.INDENT"
# test = [
# "The spaces before this string do not get a token"
# ]
else:
out += token_string
prev_toktype = token_type
last_col = end_col
last_lineno = end_line
return out
def get_comments(source_code):
io_obj = StringIO(source_code.encode("utf-8"))
comments = []
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.COMMENT:
comments.append(token_string.replace("#", " ").strip())
return comments
def get_docstring(node):
try:
return ast.get_docstring(node)
except TypeError:
return None
def format_code(source_code):
return textwrap.dedent(source_code) | true | true |
f7ff63d694a47b6fa751918e37b20339dee82a76 | 5,155 | py | Python | doex/rcbd.py | rohitsanj/doe | d1fe3629dfe3fb789dfe42b072c2682581a9ae90 | [
"BSD-3-Clause"
] | 11 | 2020-10-15T12:11:00.000Z | 2022-01-17T06:45:36.000Z | doex/rcbd.py | rohitsanj/doe | d1fe3629dfe3fb789dfe42b072c2682581a9ae90 | [
"BSD-3-Clause"
] | 16 | 2020-10-15T12:39:11.000Z | 2020-11-03T17:37:09.000Z | doex/rcbd.py | rohitsanj/doe | d1fe3629dfe3fb789dfe42b072c2682581a9ae90 | [
"BSD-3-Clause"
] | 1 | 2020-10-15T13:31:23.000Z | 2020-10-15T13:31:23.000Z | import numpy as np
from .utils import p_value, create_anova_table, multiple_comparisons
class RandomizedCompleteBlockDesign:
def __init__(self, data):
self.data = np.array(data)
n_treatments, n_blocks = self.data.shape
if hasattr(self, "num_missing"):
num_missing = self.num_missing
else:
num_missing = 0
N = 0
for entry in self.data:
N += len(entry)
self.correction_factor = np.square(np.sum(self.data)) / N
# Calculate Sum of Squares
self.row_totals = np.sum(self.data, axis=1)
self.ss_treatments = np.sum(np.square(self.row_totals)) / n_blocks
self.ss_treatments = self.ss_treatments - self.correction_factor
self.column_totals = np.sum(self.data, axis=0)
self.ss_blocks = np.sum(np.square(self.column_totals)) / n_treatments
self.ss_blocks = self.ss_blocks - self.correction_factor
self.ss_total = np.sum(np.square(self.data)) - self.correction_factor
self.ss_error = self.ss_total - (self.ss_treatments + self.ss_blocks)
# Calculate Degrees of Freedom
self.dof_treatments = n_treatments - 1
self.dof_blocks = n_blocks - 1
self.dof_total = N - 1
self.dof_error = self.dof_total - (self.dof_treatments + self.dof_blocks + num_missing)
# Calculate Mean Sum of Squares
self.mss_treatments = self.ss_treatments / self.dof_treatments
self.mss_blocks = self.ss_blocks / self.dof_blocks
self.mss_error = self.ss_error / self.dof_error
self.f_treatments = self.mss_treatments / self.mss_error
self.f_blocks = self.mss_blocks / self.mss_error
self.p_treatments = p_value(self.f_treatments, self.dof_treatments, self.dof_error)
self.p_blocks = p_value(self.f_blocks, self.dof_blocks, self.dof_error)
# Display results
self.table = self._create_table()
print(self.table)
def multiple_comparisons(self):
# Multiple comparisons
n_treatments, _ = self.data.shape
print(
multiple_comparisons(
list(range(1, n_treatments + 1)),
self.data,
self.dof_error,
np.sqrt(self.mss_error),
)
)
def _create_table(self):
table = create_anova_table()
rows = [
[
"Treatments",
self.dof_treatments,
self.ss_treatments,
self.mss_treatments,
self.f_treatments,
self.p_treatments,
],
[
"Blocks",
self.dof_blocks,
self.ss_blocks,
self.mss_blocks,
self.f_blocks,
self.p_blocks,
],
["Error", self.dof_error, self.ss_error, self.mss_error, "", ""],
["Total", self.dof_total, self.ss_total, "", "", ""],
]
for row in rows:
table.add_row(row)
return table
TwoWayANOVA = RandomizedCompleteBlockDesign
class RandomizedCompleteBlockDesign_MissingValues(RandomizedCompleteBlockDesign):
def __init__(self, data):
self.data = np.array(data)
n_treatments, n_blocks = self.data.shape
self.num_missing = np.count_nonzero(np.isnan(self.data))
missing_locations = np.argwhere(np.isnan(self.data))
self.handle_missing(self.data, missing_locations)
print("Data after adjusting for {} missing value(s)".format(self.num_missing))
print(self.data)
# Continue with RCBD analysis
super().__init__(self.data)
def handle_missing(self, data, locations):
if len(locations) == 1:
return self._missing_1_value(data, locations[0])
elif len(locations) == 2:
return self._missing_2_values(data, locations)
else:
raise Exception("Data must have either 1 or 2 missing values")
def _missing_1_value(self, data, location):
k, r = data.shape # k treatments, r replications
i, j = location
G = np.nansum(data)
treatments_sum = np.nansum(data[i, :])
blocks_sum = np.nansum(data[:, j])
self.data[i, j] = (r * blocks_sum + k * treatments_sum - G) / ((r - 1) * (k - 1))
def _missing_2_values(self, data, locations):
k, r = data.shape # k treatments, r replications
y1_loc, y2_loc = locations
i, j = y1_loc
m, j_1 = y2_loc
G = np.nansum(data)
Ti = np.nansum(data[i, :])
Tm = np.nansum(data[m, :])
Bj = np.nansum(data[:, j])
Bj_1 = np.nansum(data[:, j_1])
y1_estimate = ((k - 1) * (r - 1) * (k * Ti + r * Bj - G) - (k * Tm + r * Bj_1 - G)) / (
np.square(r - 1) * np.square(k - 1) - 1
)
y2_estimate = ((k - 1) * (r - 1) * (k * Tm + r * Bj_1 - G) - (k * Ti + r * Bj - G)) / (
np.square(r - 1) * np.square(k - 1) - 1
)
self.data[y1_loc[0], y1_loc[1]] = y1_estimate
self.data[y2_loc[0], y2_loc[1]] = y2_estimate
| 32.21875 | 95 | 0.576528 | import numpy as np
from .utils import p_value, create_anova_table, multiple_comparisons
class RandomizedCompleteBlockDesign:
def __init__(self, data):
self.data = np.array(data)
n_treatments, n_blocks = self.data.shape
if hasattr(self, "num_missing"):
num_missing = self.num_missing
else:
num_missing = 0
N = 0
for entry in self.data:
N += len(entry)
self.correction_factor = np.square(np.sum(self.data)) / N
self.row_totals = np.sum(self.data, axis=1)
self.ss_treatments = np.sum(np.square(self.row_totals)) / n_blocks
self.ss_treatments = self.ss_treatments - self.correction_factor
self.column_totals = np.sum(self.data, axis=0)
self.ss_blocks = np.sum(np.square(self.column_totals)) / n_treatments
self.ss_blocks = self.ss_blocks - self.correction_factor
self.ss_total = np.sum(np.square(self.data)) - self.correction_factor
self.ss_error = self.ss_total - (self.ss_treatments + self.ss_blocks)
self.dof_treatments = n_treatments - 1
self.dof_blocks = n_blocks - 1
self.dof_total = N - 1
self.dof_error = self.dof_total - (self.dof_treatments + self.dof_blocks + num_missing)
self.mss_treatments = self.ss_treatments / self.dof_treatments
self.mss_blocks = self.ss_blocks / self.dof_blocks
self.mss_error = self.ss_error / self.dof_error
self.f_treatments = self.mss_treatments / self.mss_error
self.f_blocks = self.mss_blocks / self.mss_error
self.p_treatments = p_value(self.f_treatments, self.dof_treatments, self.dof_error)
self.p_blocks = p_value(self.f_blocks, self.dof_blocks, self.dof_error)
self.table = self._create_table()
print(self.table)
def multiple_comparisons(self):
n_treatments, _ = self.data.shape
print(
multiple_comparisons(
list(range(1, n_treatments + 1)),
self.data,
self.dof_error,
np.sqrt(self.mss_error),
)
)
def _create_table(self):
table = create_anova_table()
rows = [
[
"Treatments",
self.dof_treatments,
self.ss_treatments,
self.mss_treatments,
self.f_treatments,
self.p_treatments,
],
[
"Blocks",
self.dof_blocks,
self.ss_blocks,
self.mss_blocks,
self.f_blocks,
self.p_blocks,
],
["Error", self.dof_error, self.ss_error, self.mss_error, "", ""],
["Total", self.dof_total, self.ss_total, "", "", ""],
]
for row in rows:
table.add_row(row)
return table
TwoWayANOVA = RandomizedCompleteBlockDesign
class RandomizedCompleteBlockDesign_MissingValues(RandomizedCompleteBlockDesign):
def __init__(self, data):
self.data = np.array(data)
n_treatments, n_blocks = self.data.shape
self.num_missing = np.count_nonzero(np.isnan(self.data))
missing_locations = np.argwhere(np.isnan(self.data))
self.handle_missing(self.data, missing_locations)
print("Data after adjusting for {} missing value(s)".format(self.num_missing))
print(self.data)
super().__init__(self.data)
def handle_missing(self, data, locations):
if len(locations) == 1:
return self._missing_1_value(data, locations[0])
elif len(locations) == 2:
return self._missing_2_values(data, locations)
else:
raise Exception("Data must have either 1 or 2 missing values")
def _missing_1_value(self, data, location):
k, r = data.shape
i, j = location
G = np.nansum(data)
treatments_sum = np.nansum(data[i, :])
blocks_sum = np.nansum(data[:, j])
self.data[i, j] = (r * blocks_sum + k * treatments_sum - G) / ((r - 1) * (k - 1))
def _missing_2_values(self, data, locations):
k, r = data.shape
y1_loc, y2_loc = locations
i, j = y1_loc
m, j_1 = y2_loc
G = np.nansum(data)
Ti = np.nansum(data[i, :])
Tm = np.nansum(data[m, :])
Bj = np.nansum(data[:, j])
Bj_1 = np.nansum(data[:, j_1])
y1_estimate = ((k - 1) * (r - 1) * (k * Ti + r * Bj - G) - (k * Tm + r * Bj_1 - G)) / (
np.square(r - 1) * np.square(k - 1) - 1
)
y2_estimate = ((k - 1) * (r - 1) * (k * Tm + r * Bj_1 - G) - (k * Ti + r * Bj - G)) / (
np.square(r - 1) * np.square(k - 1) - 1
)
self.data[y1_loc[0], y1_loc[1]] = y1_estimate
self.data[y2_loc[0], y2_loc[1]] = y2_estimate
| true | true |
f7ff646590489831f35fa9fe7ca9c0fe9f2f76be | 592 | py | Python | ProjectEuler_plus/euler_042.py | byung-u/HackerRank | 4c02fefff7002b3af774b99ebf8d40f149f9d163 | [
"MIT"
] | null | null | null | ProjectEuler_plus/euler_042.py | byung-u/HackerRank | 4c02fefff7002b3af774b99ebf8d40f149f9d163 | [
"MIT"
] | null | null | null | ProjectEuler_plus/euler_042.py | byung-u/HackerRank | 4c02fefff7002b3af774b99ebf8d40f149f9d163 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from math import sqrt
# (n * (n + 1)) / 2 -> n ** 2 + n - (2 * x)
# Solved with quadratic equation
# https://en.wikipedia.org/wiki/Quadratic_equation
for _ in range(int(input().strip())):
t = int(input().strip())
d = (sqrt(4 * 2 * t + 1) - 1)
if d.is_integer():
print(int(d) // 2)
else:
print(-1)
def e42():
for _ in range(int(input().strip())):
n = int(input().strip())
root = int(sqrt(2 * n))
if (root * (root + 1)) // 2 == n:
print(root)
else:
print(-1)
| 21.925926 | 52 | 0.489865 |
import sys
from math import sqrt
for _ in range(int(input().strip())):
t = int(input().strip())
d = (sqrt(4 * 2 * t + 1) - 1)
if d.is_integer():
print(int(d) // 2)
else:
print(-1)
def e42():
for _ in range(int(input().strip())):
n = int(input().strip())
root = int(sqrt(2 * n))
if (root * (root + 1)) // 2 == n:
print(root)
else:
print(-1)
| true | true |
f7ff649ffed8e7a7e3e3be786fab36a5e6e14591 | 2,601 | py | Python | src/decisionengine/framework/dataspace/datasources/sqlalchemy_ds/utils.py | moibenko/decisionengine | 4c458e0c225ec2ce1e82d56e752724983331b7d1 | [
"Apache-2.0"
] | null | null | null | src/decisionengine/framework/dataspace/datasources/sqlalchemy_ds/utils.py | moibenko/decisionengine | 4c458e0c225ec2ce1e82d56e752724983331b7d1 | [
"Apache-2.0"
] | null | null | null | src/decisionengine/framework/dataspace/datasources/sqlalchemy_ds/utils.py | moibenko/decisionengine | 4c458e0c225ec2ce1e82d56e752724983331b7d1 | [
"Apache-2.0"
] | null | null | null | # SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
"""
Code not written by us
"""
import os
import sqlalchemy
import structlog
from decisionengine.framework.modules.logging_configDict import LOGGERNAME
__all__ = ["orm_as_dict", "clone_model", "add_engine_pidguard"]
def orm_as_dict(obj):
"""Based on : https://stackoverflow.com/a/37350445"""
return {c.key: getattr(obj, c.key) for c in sqlalchemy.inspect(obj).mapper.column_attrs}
def clone_model(model, **kwargs):
"""Based on https://stackoverflow.com/a/55991358"""
# will raise AttributeError if data not loaded
try:
model.sequence_id # taskmanager doesn't have an 'id' column
except AttributeError:
model.id # pylint: disable=pointless-statement
table = model.__table__
non_pk_columns = [k for k in table.columns.keys() if k not in table.primary_key]
data = {c: getattr(model, c) for c in non_pk_columns}
data.update(kwargs)
return model.__class__(**data)
def add_engine_pidguard(engine):
"""
Based on
https://stackoverflow.com/questions/62920507/using-sqlalchemy-connection-pooling-queues-with-python-multiprocessing
"""
structlog.getLogger(LOGGERNAME).debug(f"setting up add_engine_pidguard for {engine}")
@sqlalchemy.event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
"""
Based on
https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#foreign-key-support
https://docs.sqlalchemy.org/en/14/core/pooling.html#using-connection-pools-with-multiprocessing-or-os-fork
"""
if "sqlite" in str(type(dbapi_connection)):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.execute("PRAGMA busy_timeout=5000") # permit retrys for 5 seconds only
cursor.close()
connection_record.info["pid"] = os.getpid()
@sqlalchemy.event.listens_for(engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
"""
Based on
https://docs.sqlalchemy.org/en/14/core/pooling.html#using-connection-pools-with-multiprocessing-or-os-fork
"""
pid = os.getpid()
if connection_record.info["pid"] != pid:
connection_record.connection = connection_proxy.connection = None
raise sqlalchemy.exc.DisconnectionError(
f"Connection record belongs to pid {connection_record.info['pid']}, attempting to check out in pid {pid}"
)
| 36.633803 | 121 | 0.688966 |
import os
import sqlalchemy
import structlog
from decisionengine.framework.modules.logging_configDict import LOGGERNAME
__all__ = ["orm_as_dict", "clone_model", "add_engine_pidguard"]
def orm_as_dict(obj):
return {c.key: getattr(obj, c.key) for c in sqlalchemy.inspect(obj).mapper.column_attrs}
def clone_model(model, **kwargs):
try:
model.sequence_id
except AttributeError:
model.id # pylint: disable=pointless-statement
table = model.__table__
non_pk_columns = [k for k in table.columns.keys() if k not in table.primary_key]
data = {c: getattr(model, c) for c in non_pk_columns}
data.update(kwargs)
return model.__class__(**data)
def add_engine_pidguard(engine):
structlog.getLogger(LOGGERNAME).debug(f"setting up add_engine_pidguard for {engine}")
@sqlalchemy.event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
if "sqlite" in str(type(dbapi_connection)):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.execute("PRAGMA busy_timeout=5000") # permit retrys for 5 seconds only
cursor.close()
connection_record.info["pid"] = os.getpid()
@sqlalchemy.event.listens_for(engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
if connection_record.info["pid"] != pid:
connection_record.connection = connection_proxy.connection = None
raise sqlalchemy.exc.DisconnectionError(
f"Connection record belongs to pid {connection_record.info['pid']}, attempting to check out in pid {pid}"
)
| true | true |
f7ff675f73a1e7d04318c420bc49893e895219be | 2,943 | py | Python | tests/test_class_oelint_vars_appendop.py | QuakeSaver/oelint-adv | e03617b51c7ebdeb8ea245eb61da3e3e03195b37 | [
"BSD-2-Clause"
] | null | null | null | tests/test_class_oelint_vars_appendop.py | QuakeSaver/oelint-adv | e03617b51c7ebdeb8ea245eb61da3e3e03195b37 | [
"BSD-2-Clause"
] | null | null | null | tests/test_class_oelint_vars_appendop.py | QuakeSaver/oelint-adv | e03617b51c7ebdeb8ea245eb61da3e3e03195b37 | [
"BSD-2-Clause"
] | null | null | null | import pytest
from base import TestBaseClass
class TestClassOelintVarAppendOp(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.vars.appendop'])
@pytest.mark.parametrize('occurrence', [1])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
A ??= "1"
A += "2"
'''
},
{
'oelint_adv_test.bb':
'''
B += "B"
B ?= "A"
'''
},
{
'oelint_adv_test.bb':
'''
C ??= "1"
C .= "2"
'''
},
{
'oelint_adv_test.bb':
'''
D .= "2"
D ?= "1"
'''
},
{
'oelint_adv_test.bb':
'''
F ??= "1"
F =+ "2"
'''
},
{
'oelint_adv_test.bb':
'''
G =+ "B"
G ?= "A"
'''
},
{
'oelint_adv_test.bb':
'''
H ??= "1"
H =. "2"
'''
},
{
'oelint_adv_test.bb':
'''
I =. "2"
I ?= "1"
'''
},
],
)
def test_bad(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
@pytest.mark.parametrize('id', ['oelint.vars.appendop'])
@pytest.mark.parametrize('occurrence', [0])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
A ??= "1"
A_append = "2"
'''
},
{
'oelint_adv_test.bb':
'''
B_append = "B"
B ?= "A"
'''
},
{
'oelint_adv_test.bb':
'''
C ??= "1"
C_append = "2"
'''
},
{
'oelint_adv_test.bb':
'''
D_append = "2"
D ?= "1"
'''
},
{
'oelint_adv_test.bb':
'''
F ??= "1"
F_prepend = "2"
'''
},
{
'oelint_adv_test.bb':
'''
G_prepend = "B"
G ?= "A"
'''
},
{
'oelint_adv_test.bb':
'''
H ??= "1"
H_prepend = "2"
'''
},
{
'oelint_adv_test.bb':
'''
I_prepend = "2"
I ?= "1"
'''
},
],
)
def test_good(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
| 21.639706 | 67 | 0.285423 | import pytest
from base import TestBaseClass
class TestClassOelintVarAppendOp(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.vars.appendop'])
@pytest.mark.parametrize('occurrence', [1])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
A ??= "1"
A += "2"
'''
},
{
'oelint_adv_test.bb':
'''
B += "B"
B ?= "A"
'''
},
{
'oelint_adv_test.bb':
'''
C ??= "1"
C .= "2"
'''
},
{
'oelint_adv_test.bb':
'''
D .= "2"
D ?= "1"
'''
},
{
'oelint_adv_test.bb':
'''
F ??= "1"
F =+ "2"
'''
},
{
'oelint_adv_test.bb':
'''
G =+ "B"
G ?= "A"
'''
},
{
'oelint_adv_test.bb':
'''
H ??= "1"
H =. "2"
'''
},
{
'oelint_adv_test.bb':
'''
I =. "2"
I ?= "1"
'''
},
],
)
def test_bad(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
@pytest.mark.parametrize('id', ['oelint.vars.appendop'])
@pytest.mark.parametrize('occurrence', [0])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
A ??= "1"
A_append = "2"
'''
},
{
'oelint_adv_test.bb':
'''
B_append = "B"
B ?= "A"
'''
},
{
'oelint_adv_test.bb':
'''
C ??= "1"
C_append = "2"
'''
},
{
'oelint_adv_test.bb':
'''
D_append = "2"
D ?= "1"
'''
},
{
'oelint_adv_test.bb':
'''
F ??= "1"
F_prepend = "2"
'''
},
{
'oelint_adv_test.bb':
'''
G_prepend = "B"
G ?= "A"
'''
},
{
'oelint_adv_test.bb':
'''
H ??= "1"
H_prepend = "2"
'''
},
{
'oelint_adv_test.bb':
'''
I_prepend = "2"
I ?= "1"
'''
},
],
)
def test_good(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
| true | true |
f7ff6802205c2f38671825e0135c21da6f9045a2 | 921 | py | Python | pytglib/api/functions/set_chat_description.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/functions/set_chat_description.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/functions/set_chat_description.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
class SetChatDescription(Object):
"""
Changes information about a chat. Available for basic groups, supergroups, and channels. Requires can_change_info rights
Attributes:
ID (:obj:`str`): ``SetChatDescription``
Args:
chat_id (:obj:`int`):
Identifier of the chat
description (:obj:`str`):
New chat description; 0-255 characters
Returns:
Ok
Raises:
:class:`telegram.Error`
"""
ID = "setChatDescription"
def __init__(self, chat_id, description, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id # int
self.description = description # str
@staticmethod
def read(q: dict, *args) -> "SetChatDescription":
chat_id = q.get('chat_id')
description = q.get('description')
return SetChatDescription(chat_id, description)
| 24.891892 | 125 | 0.618893 |
from ..utils import Object
class SetChatDescription(Object):
ID = "setChatDescription"
def __init__(self, chat_id, description, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id
self.description = description
@staticmethod
def read(q: dict, *args) -> "SetChatDescription":
chat_id = q.get('chat_id')
description = q.get('description')
return SetChatDescription(chat_id, description)
| true | true |
f7ff69dd8a529c57831e0f5e1a8547468a95d4f4 | 27,749 | py | Python | tf2onnx/rewriter/custom_rnn_rewriter.py | anttisaukko/tensorflow-onnx | 1341bdf476df6023b75bc6b3c6e4cda00cc58a29 | [
"MIT"
] | null | null | null | tf2onnx/rewriter/custom_rnn_rewriter.py | anttisaukko/tensorflow-onnx | 1341bdf476df6023b75bc6b3c6e4cda00cc58a29 | [
"MIT"
] | null | null | null | tf2onnx/rewriter/custom_rnn_rewriter.py | anttisaukko/tensorflow-onnx | 1341bdf476df6023b75bc6b3c6e4cda00cc58a29 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
tf2onnx.rewriter.custom_rnn_rewriter - custom rnn support
"""
from __future__ import division
from __future__ import print_function
import logging
import sys
from onnx import helper, onnx_pb
import numpy as np
from tf2onnx.graph import Graph, Node
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
from tf2onnx.rewriter.loop_rewriter_base import LoopRewriterBase, Context
from tf2onnx.rewriter.rnn_utils import is_tensor_array_gather_op, is_tensor_array_write_op, \
is_placeholder_op, make_onnx_node
from tf2onnx.rewriter.rnn_utils import BodyGraphDict, REWRITER_RESULT, SubGraphMetadata
from tf2onnx.tfonnx import utils
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("tf2onnx.rewriter.custom_rnn_rewriter")
INVLAID_INPUT_ID = "invalid:0"
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,broad-except,protected-access
class CustomRnnContext(Context):
def __init__(self):
super(CustomRnnContext, self).__init__()
self.other_loop_vars = {}
self.rnn_scope = None
self.output_tas = []
self.input_tas = []
self.time_var = None
self.iteration_var = None
class TensorArrayProp(object):
def __init__(self):
self.index_input_id = None
self.data_input_id = None
self.output_id = None
class ScanProperties(object):
def __init__(self, initial_state_and_scan_inputs, loop_state_inputs,
loop_state_outputs, loop_scan_inputs, loop_scan_outputs):
self.initial_state_and_scan_inputs = initial_state_and_scan_inputs
self.loop_state_inputs = loop_state_inputs
self.loop_state_outputs = loop_state_outputs
self.loop_scan_inputs = loop_scan_inputs
self.loop_scan_outputs = loop_scan_outputs
class CustomRnnRewriter(LoopRewriterBase):
def __init__(self, g):
super(CustomRnnRewriter, self).__init__(g)
self.rnn_input_pattern = \
OpTypePattern('TensorArrayReadV3', name='ta_read', inputs=[
OpTypePattern("Enter", name="ta_enter", inputs=[
OpTypePattern("TensorArrayV3")
]),
OpTypePattern('*'),
OpTypePattern("Enter", name="ta_scatter_enter", inputs=[
OpTypePattern("TensorArrayScatterV3", name="ta_input_scatter")
]),
])
def create_context(self):
return CustomRnnContext()
def run(self):
log.debug("enter custom rnn rewriter")
return self.run_internal()
def _get_rnn_scope_name(self, while_scope_name):
parts = while_scope_name.split('/')
rnn_scope = '/'.join(parts[0:-2]) + "/"
log.debug("found rnn scope %s", rnn_scope)
return rnn_scope
def _parse_rnn_loop(self, context):
# check a while loop is generated by dynamic_rnn or bidirectional_rnn by
#
# 1. some patterns in _time_step in dynamic_rnn: tensor array read, tensor array write
# 2. some patterns in control_flow_ops.while_loop in dynamic_rnn:
# cond: time < loop_bound
# loop_vars: (time, output_ta, state)
# time has name called "time"
# iteration_cnt is added by control flow.
# be noted:
# 1. iteration counter does not exist in tf1.4 or earlier versions
# 2. if dynamic_rnn's first input is not consumed, output ta does not exist.
time_name = context.rnn_scope + "time"
ta_array_name_prefix = context.rnn_scope + "dynamic_rnn/output_"
iteration_counter_name = context.while_context_scope + "iteration_counter"
found_time = False
is_rnn_out_ta = True
for enter_name, val in context.loop_variables.items():
enter_input_node = self.g.get_node_by_name(val.enter_input_id)
if val.is_tensor_array:
ta_name = enter_input_node.get_attr("tensor_array_name").s.decode("utf-8")
if not ta_name.startswith(ta_array_name_prefix):
is_rnn_out_ta = False
elif enter_input_node.name == time_name:
found_time = True
context.time_var = val
elif enter_input_node.name == iteration_counter_name:
context.iteration_var = val
else:
context.other_loop_vars[enter_name] = val
if not (found_time and is_rnn_out_ta):
log.debug("this should not be a dynamic_rnn loop, found_time: %s, is_rnn_out_ta: %s",
found_time, is_rnn_out_ta)
return False
return True
def need_rewrite(self, context):
context.rnn_scope = self._get_rnn_scope_name(context.while_context_scope)
if not self._parse_rnn_loop(context):
log.debug("skip the loop due to parse_rnn_loop failed")
return False
self._parse_time_var(context)
self._parse_output_ta(context)
self._parse_input_ta(context)
if not (context.input_tas or context.output_tas):
log.debug("this should not be a dynamic_rnn loop, no ta input or output are found")
return False
return True
def rewrite(self, context):
log.debug("enter rewrite function")
scan_node = None
try:
to_remove = self._cut_off_connection_for_cell(context)
all_nodes = self.g.get_nodes()
for n in set(to_remove):
if n in all_nodes:
all_nodes.remove(n)
self.g.set_nodes(all_nodes)
scan_props, nodes_to_append = self._compose_cell_inputs_and_outputs(context)
scan_node = self._create_scan_node(context, scan_props)
if not scan_node:
log.error("failed to create scan node during rewrite")
return REWRITER_RESULT.FAIL
nodes_to_append.append(scan_node)
_ = self._extract_and_register_cell_graph_info(context, scan_props, scan_node)
to_append = self._connect_scan_with_output(context, scan_node)
nodes_to_append.extend(to_append)
all_nodes = self.g.get_nodes()
all_nodes.extend(nodes_to_append)
self.g.set_nodes(all_nodes)
return REWRITER_RESULT.OK
except Exception as ex:
if scan_node and BodyGraphDict.has_body_graph_info(scan_node.name):
BodyGraphDict.pop_body_graph_info(scan_node.name)
log.error("remove scan node body graph from dict")
log.error("rewrite failed, due to exception: %s", ex)
return REWRITER_RESULT.FAIL
def _parse_time_var(self, context):
time_var = context.time_var
log.debug("time var %s - enter input id (%s) shape: %s, output (%s) shape: %s", time_var.enter_name,
time_var.enter_input_id, self.g.get_shape(time_var.enter_input_id),
time_var.switch_true_identity_output_id, self.g.get_shape(time_var.switch_true_identity_output_id))
def _parse_output_ta(self, context):
for enter_name, loop_var in context.loop_variables.items():
if not loop_var.is_tensor_array:
continue
output_ta = TensorArrayProp()
output_ta.data_input_id = loop_var.next_iteration_input_id
output_ta.index_input_id = loop_var.ta_index_id
if loop_var.exit_output_id:
exit_consumers = self.g.find_output_consumers(loop_var.exit_output_id)
ta_gather_node = [n for n in exit_consumers if is_tensor_array_gather_op(n)][0]
output_ta.output_id = ta_gather_node.output[0]
context.output_tas.append(output_ta)
log.debug("output ta %s - data input (%s) shape: %s, output (%s) shape: %s", enter_name,
output_ta.data_input_id, self.g.get_shape(output_ta.data_input_id),
output_ta.output_id, self.g.get_shape(output_ta.output_id))
def _parse_input_ta(self, context):
matcher = GraphMatcher(self.rnn_input_pattern, allow_reorder=True)
match_results = list(matcher.match_ops(self.g.get_nodes()))
match_results = [r for r in match_results if r.get_op("ta_input_scatter").name.startswith(context.rnn_scope)]
for match in match_results:
ta_input_scatter = match.get_op("ta_input_scatter")
# the 3rd input of scatter is the value
input_ta = TensorArrayProp()
# dynamic_rnn specific approach.
input_ta.data_input_id = ta_input_scatter.input[2]
ta_read_node = match.get_op("ta_read")
input_ta.index_input_id = ta_read_node.input[1]
input_ta.output_id = match.get_op("ta_read").output[0]
context.input_tas.append(input_ta)
log.debug("input ta %s - data input (%s) shape: %s, output (%s) shape: %s", ta_read_node.name,
input_ta.data_input_id, self.g.get_shape(input_ta.data_input_id),
input_ta.output_id, self.g.get_shape(input_ta.output_id))
def _cut_off_connection_for_cell(self, context):
nodes_to_remove = []
all_vars = [context.time_var]
all_vars += [val for _, val in context.other_loop_vars.items()]
for val in all_vars:
# remove the node to cut off a starting node of the cell (e.g. loop body).
nodes_to_remove.append(self.g.get_node_by_name(val.switch_true_identity_output_id))
# connect NextIteration to an invalid node, to cut off a ending node of the cell.
next_iter_nodes = [n for n in self.g.get_nodes() if n.type == "NextIteration"]
self.g.replace_all_inputs(next_iter_nodes, val.next_iteration_input_id, INVLAID_INPUT_ID)
for input_ta in context.input_tas:
# remove the node to cut off connection between scan_input and the cell.
nodes_to_remove.append(self.g.get_node_by_name(input_ta.output_id))
for output_ta in context.output_tas:
# remove the node to cut off connection between scan_output and the cell.
ta_write_nodes = [n for n in self.g.get_nodes() if is_tensor_array_write_op(n)]
self.g.replace_all_inputs(ta_write_nodes, output_ta.data_input_id, INVLAID_INPUT_ID)
return nodes_to_remove
def _compose_cell_inputs_and_outputs(self, context):
log.debug("_compose_cell_inputs_and_outputs")
nodes_to_append = []
loop_state_inputs = []
loop_state_outputs = []
initial_state_and_scan_inputs = []
# change time shape to {1} since current Scan does not support
time_var, to_append = self._adapt_time_var_as_workaround(context.time_var)
nodes_to_append.extend(to_append)
log.debug("prepare cell state inputs")
vars_to_iterate = [time_var] + [val for _, val in context.other_loop_vars.items()]
for var in vars_to_iterate:
nodes = self._adapt_scan_sequence_input_or_output("input", var.enter_input_id, False)
var.enter_input_id = nodes[-1].output[0]
nodes_to_append.extend(nodes)
loop_state_inputs.append(var.switch_true_identity_output_id)
loop_state_outputs.append(var.next_iteration_input_id)
initial_state_and_scan_inputs.append(var.enter_input_id)
log.debug("prepare cell scan inputs")
loop_scan_inputs = []
for input_ta in context.input_tas:
nodes = self._adapt_scan_sequence_input_or_output("input_ta", input_ta.data_input_id, False)
input_ta.data_input_id = nodes[-1].output[0]
nodes_to_append.extend(nodes)
loop_scan_inputs.append(input_ta.output_id)
initial_state_and_scan_inputs.append(input_ta.data_input_id)
log.debug("prepare cell scan outputs")
loop_scan_outputs = []
for output_ta in context.output_tas:
loop_scan_outputs.append(output_ta.data_input_id)
scan_props = ScanProperties(initial_state_and_scan_inputs, loop_state_inputs, loop_state_outputs,
loop_scan_inputs, loop_scan_outputs)
return scan_props, nodes_to_append
def _create_scan_node(self, context, scan_props):
log.debug("create scan node")
# here we did not give the sequence_length, because
# current batch size is 1, not original batch size
# original seq_length will be used by the loop body of Scan op.
scan_node = make_onnx_node(self.g, "Scan", [""] + scan_props.initial_state_and_scan_inputs,
attr={"num_scan_inputs": len(scan_props.loop_scan_inputs)},
output_count=len(scan_props.loop_state_outputs + scan_props.loop_scan_outputs),
skip_conversion=True)
# the first state var is time-iterator.
index = 0
time_input_shape = self.g.get_shape(scan_node.input[1])
time_input_dtype = self.g.get_dtype(scan_node.input[1])
log.debug("_create_scan_node - set scan state_output shape for %s[%s]:%s",
scan_node.name, index, time_input_shape)
self.g.set_shape(scan_node.output[index], time_input_shape)
self.g.set_dtype(scan_node.output[index], time_input_dtype)
index += 1
# for other state vars
state_input_shape = self.g.get_shape(scan_node.input[2])
state_input_dtype = self.g.get_dtype(scan_node.input[2])
for i in range(len(scan_props.loop_state_outputs) - 1):
log.debug("_create_scan_node - set scan state_output shape for %s[%s]:%s",
scan_node.name, index, state_input_shape)
self.g.set_shape(scan_node.output[index], state_input_shape)
self.g.set_dtype(scan_node.output[index], state_input_dtype)
index += 1
last_scan_input_shape = self.g.get_shape(scan_node.input[-1])
batch = last_scan_input_shape[0] # should be 1
time = last_scan_input_shape[1]
for i in range(len(scan_props.loop_scan_outputs)):
scan_out_dtype = self.g.get_dtype(scan_props.loop_scan_outputs[i])
scan_output_shape = [batch, time] + self.g.get_shape(scan_props.loop_scan_outputs[i])
log.debug("scan output [%s] has shape %s, batch:%s, time: %s",
scan_props.loop_scan_outputs[i], scan_output_shape, batch, time)
log.debug("_create_scan_node - set scan scan_output shape for %s[%s]:%s",
scan_node.name, index, scan_output_shape)
self.g.set_shape(scan_node.output[index], scan_output_shape)
self.g.set_dtype(scan_node.output[index], scan_out_dtype)
index += 1
return scan_node
def _extract_and_register_cell_graph_info(self, context, scan_props, scan_node):
log.debug("_extract_cell_graph_nodes")
sub_graph_inputs = scan_props.loop_state_inputs + scan_props.loop_scan_inputs
sub_graph_outputs = scan_props.loop_state_outputs + scan_props.loop_scan_outputs
body_graph_meta = SubGraphMetadata(self.g, sub_graph_inputs, sub_graph_outputs,
scan_props.initial_state_and_scan_inputs)
# according to input and output, find the body graph
nodes, enter_nodes = self.find_subgraph(body_graph_meta, self.g)
other_enter_input_ids = []
for enter_node in enter_nodes:
# connect Enter's output to Enter's input
self.g.replace_all_inputs(self.g.get_nodes(), enter_node.output[0], enter_node.input[0])
nodes = self.g._extract_sub_graph_nodes(self.g.get_node_by_name(enter_node.input[0]))
# if the enter target subgraph contains planeholder, then we keep record that as cell boundary.
has_placeholder = None
for n in nodes:
if is_placeholder_op(n):
has_placeholder = True
break
# if there is placeholder in the Enter's input graph, then we think we should consider the Enter's input
# nodes as cell's input; otherwise, we think the input graph should be part of cell graph.
if has_placeholder is True:
log.debug("Enter input id [%s] is a subgraph containing placeholder, so make it cell boundary",
enter_node.input[0])
other_enter_input_ids.append(enter_node.input[0])
body_graph_meta.other_enter_input_ids = other_enter_input_ids
log.debug("add body graph meta data into store")
BodyGraphDict.add_body_graph_info(scan_node.name, body_graph_meta)
return nodes
def _connect_scan_with_output(self, context, scan_node):
log.debug("connect scan output with the graph")
index = 1 # ignore the 1st input (time-iterator)
nodes_to_append = []
for _, val in context.other_loop_vars.items():
var_output_id = val.exit_output_id
if var_output_id:
nodes = self._adapt_scan_sequence_input_or_output("state_output_reshape",
scan_node.output[index], True)
nodes_to_append.extend(nodes)
self.g.replace_all_inputs(self.g.get_nodes(), var_output_id, nodes[-1].output[0])
index += 1
for output_ta in context.output_tas:
ta_final_output_id = output_ta.output_id
if ta_final_output_id:
nodes = self._adapt_scan_sequence_input_or_output("scan_output_reshape",
scan_node.output[index], True)
nodes_to_append.extend(nodes)
self.g.replace_all_inputs(self.g.get_nodes(), ta_final_output_id, nodes[-1].output[0])
index += 1
return nodes_to_append
def _adapt_scan_sequence_input_or_output(self, target_name, input_id, handle_output=False):
nodes_to_add = []
shape_node = make_onnx_node(self.g, "Shape", [input_id])
nodes_to_add.append(shape_node)
inferred_shape = self.g.get_shape(input_id)
if handle_output is True:
# handle output:
# if required dim values don't contain more than one -1,
# just use a const for Reshape's shape input.
if inferred_shape is not None and inferred_shape[1:].count(-1) <= 1:
new_shape_node = self.g.make_const(utils.make_name(target_name + "_target_shape"),
np.array(inferred_shape[1:], dtype=np.int64))
else:
# otherwise, get the dim dynamically, e.g. remove the fake batch size (e.g.1)
# from [1, time, real-batch, ...]
origin_shape_node = make_onnx_node(self.g, "Cast", [shape_node.output[0]],
{"to": onnx_pb.TensorProto.FLOAT})
nodes_to_add.append(origin_shape_node)
sliced_shape_node = make_onnx_node(self.g, "Slice", [origin_shape_node.output[0]],
{"axes": [0], "starts": [1], "ends": [sys.maxsize]})
nodes_to_add.append(sliced_shape_node)
new_shape_node = make_onnx_node(self.g, "Cast", [sliced_shape_node.output[0]],
{"to": onnx_pb.TensorProto.INT64})
nodes_to_add.append(new_shape_node)
new_shape = inferred_shape[1:]
else:
# handle input:
if inferred_shape is not None and inferred_shape.count(-1) <= 1:
new_shape_node = self.g.make_const(utils.make_name(target_name + "_target_shape"),
np.array([1] + inferred_shape, dtype=np.int64))
else:
# add a fake batch size : 1
fake_batch_size_node = self.g.make_const(utils.make_name(target_name + "_target_shape"),
np.array([1,], dtype=np.int64))
new_shape_node = make_onnx_node(self.g, "Concat",
[fake_batch_size_node.output[0], shape_node.output[0]],
{"axis": 0})
nodes_to_add.append(new_shape_node)
new_shape = [1] + inferred_shape
reshape_node = make_onnx_node(self.g, "Reshape", [input_id, new_shape_node.output[0]],
skip_conversion=True, op_name_scope=target_name)
nodes_to_add.append(reshape_node)
self.g.set_shape(reshape_node.output[0], new_shape)
self.g.set_dtype(reshape_node.output[0], self.g.get_dtype(input_id))
log.debug("create Reshape for scan output %s, with output shape %s",
reshape_node.output[0], new_shape)
return nodes_to_add
# in theory, time var can be a scalar, but in current implementation of runtime, it could not be handled
# correctly, so we unsqueeze it to a list containing a single element.
def _adapt_time_var_as_workaround(self, var):
log.debug("_adapt_time_var_as_workaround")
nodes_to_append = []
# change time shape to {1} since current Scan does not support
time_init_node = self._create_unsqueeze_node("time_var_init", var.enter_input_id)
nodes_to_append.append(time_init_node)
var.enter_input_id = time_init_node.output[0]
time_output_node = self._create_unsqueeze_node("time_var_output", var.next_iteration_input_id)
nodes_to_append.append(time_output_node)
var.next_iteration_input_id = time_output_node.output[0]
time_input_node = self._create_squeeze_node("time_var_input", var.switch_true_identity_output_id)
nodes_to_append.append(time_input_node)
self.g.replace_all_inputs(self.g.get_nodes(), var.switch_true_identity_output_id, time_input_node.output[0])
self.g.set_shape(var.switch_true_identity_output_id, [1] + self.g.get_shape(var.switch_true_identity_output_id))
return var, nodes_to_append
def _create_unsqueeze_node(self, target_name, input_id):
unsqueeze_node = make_onnx_node(self.g, "Unsqueeze", [input_id], attr={"axes": [0]},
skip_conversion=True, op_name_scope=target_name)
input_shape = self.g.get_shape(input_id)
if input_shape is None:
raise ValueError(input_id + " is none")
input_shape = [1] + input_shape
self.g.set_shape(unsqueeze_node.output[0], input_shape)
self.g.set_dtype(unsqueeze_node.output[0], self.g.get_dtype(input_id))
return unsqueeze_node
def _create_squeeze_node(self, target_name, input_id):
squeeze_node = make_onnx_node(self.g, "Squeeze", [input_id], attr={"axes": [0]},
skip_conversion=True, op_name_scope=target_name)
input_shape = self.g.get_shape(input_id)
if input_shape is None:
raise ValueError(input_id + " is none")
input_shape = list(input_shape)[1:]
self.g.set_shape(squeeze_node.output[0], input_shape)
self.g.set_dtype(squeeze_node.output[0], self.g.get_dtype(input_id))
return squeeze_node
# end of time var workaround
class CustomRnnLateRewriter(object):
def __init__(self, g):
self.g = g
def rewrite(self):
log.debug("enter custom rnn late rewriter")
nodes = self.g.get_nodes()
nodes_to_remove = []
for scan_node in nodes:
if scan_node.type != "Scan":
continue
log.debug("late write for scan node %s", scan_node.name)
num_scan_inputs = scan_node.get_attr("num_scan_inputs").i
if not BodyGraphDict.has_body_graph_info(scan_node.name):
continue
body_graph_meta = BodyGraphDict.pop_body_graph_info(scan_node.name)
onnx_nodes, _ = LoopRewriterBase.find_subgraph(body_graph_meta, self.g)
nodes_to_remove.extend(onnx_nodes)
log.debug("start creating body graph for scan node %s ", scan_node.name)
body_graph_initializers = {}
const_nodes = [n for n in onnx_nodes if n.type in ("Const", "ConstV2")]
for n in const_nodes:
# when set nodes, Const should be removed, they need be replaced as initializers.
body_graph_initializers[n.output[0]] = self.g.initializers[n.output[0]]
onnx_nodes.remove(n)
onnx_nodes = set(onnx_nodes)
ops = []
for op in onnx_nodes:
onnx_op = op.op
ops.append(onnx_op)
body_g = Graph(ops, output_shapes=self.g._output_shapes, dtypes=self.g._dtypes)
body_g._initializers = body_graph_initializers
log.debug("start preparing body graph inputs nodes")
temp_nodes = body_g.get_nodes()
i = 0
input_count = len(body_graph_meta.input_ids)
for input_name, init_input_id in zip(body_graph_meta.input_ids, body_graph_meta.initial_input_ids):
shape = body_g.get_shape(input_name)
dtype = body_g.get_dtype(input_name)
if shape is None:
shape = self.g.get_shape(init_input_id)
if i >= input_count - num_scan_inputs:
loop_input_shape = list(shape)[2:] # delete [1, time,]
else:
loop_input_shape = list(shape)
else:
loop_input_shape = list(shape)
onnx_input_shape = utils.make_onnx_shape(loop_input_shape)
val = helper.make_tensor_value_info(input_name, dtype, onnx_input_shape)
body_g.add_model_input(input_name, val)
i += 1
log.debug("start preparing body graph outputs nodes")
new_output_names = []
for o in body_graph_meta.output_ids:
# insert identity node, since sometimes we need output same output_id as state_output
# and scan_out, but ONNX don't allow the same output_id appeared more than once as
# output node.
identity_name = utils.make_name("Identity")
identity_output = utils.port_name(identity_name)
node = Node(helper.make_node("Identity", [o], [identity_output], name=identity_name), body_g)
body_g.set_dtype(identity_output, body_g.get_dtype(o))
body_g.copy_shape(o, identity_output)
new_output_names.append(identity_output)
temp_nodes.append(node)
body_g.set_nodes(temp_nodes)
body_g.topological_sort(body_g.get_nodes())
log.debug("start make graph based on body graph nodes")
body_g.output_names = new_output_names
graph = body_g.make_graph("scan body graph")
scan_node.set_attr("body", graph)
# remove nodes in body graph from g
for n in set(nodes_to_remove):
if n in nodes:
nodes.remove(n)
elif self.g.is_initializer(n.output[0]):
del self.g.initializers[n.output[0]]
else:
raise ValueError("error when removing nodes")
return nodes
| 47.112054 | 120 | 0.635843 |
from __future__ import division
from __future__ import print_function
import logging
import sys
from onnx import helper, onnx_pb
import numpy as np
from tf2onnx.graph import Graph, Node
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
from tf2onnx.rewriter.loop_rewriter_base import LoopRewriterBase, Context
from tf2onnx.rewriter.rnn_utils import is_tensor_array_gather_op, is_tensor_array_write_op, \
is_placeholder_op, make_onnx_node
from tf2onnx.rewriter.rnn_utils import BodyGraphDict, REWRITER_RESULT, SubGraphMetadata
from tf2onnx.tfonnx import utils
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("tf2onnx.rewriter.custom_rnn_rewriter")
INVLAID_INPUT_ID = "invalid:0"
class CustomRnnContext(Context):
def __init__(self):
super(CustomRnnContext, self).__init__()
self.other_loop_vars = {}
self.rnn_scope = None
self.output_tas = []
self.input_tas = []
self.time_var = None
self.iteration_var = None
class TensorArrayProp(object):
def __init__(self):
self.index_input_id = None
self.data_input_id = None
self.output_id = None
class ScanProperties(object):
def __init__(self, initial_state_and_scan_inputs, loop_state_inputs,
loop_state_outputs, loop_scan_inputs, loop_scan_outputs):
self.initial_state_and_scan_inputs = initial_state_and_scan_inputs
self.loop_state_inputs = loop_state_inputs
self.loop_state_outputs = loop_state_outputs
self.loop_scan_inputs = loop_scan_inputs
self.loop_scan_outputs = loop_scan_outputs
class CustomRnnRewriter(LoopRewriterBase):
def __init__(self, g):
super(CustomRnnRewriter, self).__init__(g)
self.rnn_input_pattern = \
OpTypePattern('TensorArrayReadV3', name='ta_read', inputs=[
OpTypePattern("Enter", name="ta_enter", inputs=[
OpTypePattern("TensorArrayV3")
]),
OpTypePattern('*'),
OpTypePattern("Enter", name="ta_scatter_enter", inputs=[
OpTypePattern("TensorArrayScatterV3", name="ta_input_scatter")
]),
])
def create_context(self):
return CustomRnnContext()
def run(self):
log.debug("enter custom rnn rewriter")
return self.run_internal()
def _get_rnn_scope_name(self, while_scope_name):
parts = while_scope_name.split('/')
rnn_scope = '/'.join(parts[0:-2]) + "/"
log.debug("found rnn scope %s", rnn_scope)
return rnn_scope
def _parse_rnn_loop(self, context):
time_name = context.rnn_scope + "time"
ta_array_name_prefix = context.rnn_scope + "dynamic_rnn/output_"
iteration_counter_name = context.while_context_scope + "iteration_counter"
found_time = False
is_rnn_out_ta = True
for enter_name, val in context.loop_variables.items():
enter_input_node = self.g.get_node_by_name(val.enter_input_id)
if val.is_tensor_array:
ta_name = enter_input_node.get_attr("tensor_array_name").s.decode("utf-8")
if not ta_name.startswith(ta_array_name_prefix):
is_rnn_out_ta = False
elif enter_input_node.name == time_name:
found_time = True
context.time_var = val
elif enter_input_node.name == iteration_counter_name:
context.iteration_var = val
else:
context.other_loop_vars[enter_name] = val
if not (found_time and is_rnn_out_ta):
log.debug("this should not be a dynamic_rnn loop, found_time: %s, is_rnn_out_ta: %s",
found_time, is_rnn_out_ta)
return False
return True
def need_rewrite(self, context):
context.rnn_scope = self._get_rnn_scope_name(context.while_context_scope)
if not self._parse_rnn_loop(context):
log.debug("skip the loop due to parse_rnn_loop failed")
return False
self._parse_time_var(context)
self._parse_output_ta(context)
self._parse_input_ta(context)
if not (context.input_tas or context.output_tas):
log.debug("this should not be a dynamic_rnn loop, no ta input or output are found")
return False
return True
def rewrite(self, context):
log.debug("enter rewrite function")
scan_node = None
try:
to_remove = self._cut_off_connection_for_cell(context)
all_nodes = self.g.get_nodes()
for n in set(to_remove):
if n in all_nodes:
all_nodes.remove(n)
self.g.set_nodes(all_nodes)
scan_props, nodes_to_append = self._compose_cell_inputs_and_outputs(context)
scan_node = self._create_scan_node(context, scan_props)
if not scan_node:
log.error("failed to create scan node during rewrite")
return REWRITER_RESULT.FAIL
nodes_to_append.append(scan_node)
_ = self._extract_and_register_cell_graph_info(context, scan_props, scan_node)
to_append = self._connect_scan_with_output(context, scan_node)
nodes_to_append.extend(to_append)
all_nodes = self.g.get_nodes()
all_nodes.extend(nodes_to_append)
self.g.set_nodes(all_nodes)
return REWRITER_RESULT.OK
except Exception as ex:
if scan_node and BodyGraphDict.has_body_graph_info(scan_node.name):
BodyGraphDict.pop_body_graph_info(scan_node.name)
log.error("remove scan node body graph from dict")
log.error("rewrite failed, due to exception: %s", ex)
return REWRITER_RESULT.FAIL
def _parse_time_var(self, context):
time_var = context.time_var
log.debug("time var %s - enter input id (%s) shape: %s, output (%s) shape: %s", time_var.enter_name,
time_var.enter_input_id, self.g.get_shape(time_var.enter_input_id),
time_var.switch_true_identity_output_id, self.g.get_shape(time_var.switch_true_identity_output_id))
def _parse_output_ta(self, context):
for enter_name, loop_var in context.loop_variables.items():
if not loop_var.is_tensor_array:
continue
output_ta = TensorArrayProp()
output_ta.data_input_id = loop_var.next_iteration_input_id
output_ta.index_input_id = loop_var.ta_index_id
if loop_var.exit_output_id:
exit_consumers = self.g.find_output_consumers(loop_var.exit_output_id)
ta_gather_node = [n for n in exit_consumers if is_tensor_array_gather_op(n)][0]
output_ta.output_id = ta_gather_node.output[0]
context.output_tas.append(output_ta)
log.debug("output ta %s - data input (%s) shape: %s, output (%s) shape: %s", enter_name,
output_ta.data_input_id, self.g.get_shape(output_ta.data_input_id),
output_ta.output_id, self.g.get_shape(output_ta.output_id))
def _parse_input_ta(self, context):
matcher = GraphMatcher(self.rnn_input_pattern, allow_reorder=True)
match_results = list(matcher.match_ops(self.g.get_nodes()))
match_results = [r for r in match_results if r.get_op("ta_input_scatter").name.startswith(context.rnn_scope)]
for match in match_results:
ta_input_scatter = match.get_op("ta_input_scatter")
# the 3rd input of scatter is the value
input_ta = TensorArrayProp()
# dynamic_rnn specific approach.
input_ta.data_input_id = ta_input_scatter.input[2]
ta_read_node = match.get_op("ta_read")
input_ta.index_input_id = ta_read_node.input[1]
input_ta.output_id = match.get_op("ta_read").output[0]
context.input_tas.append(input_ta)
log.debug("input ta %s - data input (%s) shape: %s, output (%s) shape: %s", ta_read_node.name,
input_ta.data_input_id, self.g.get_shape(input_ta.data_input_id),
input_ta.output_id, self.g.get_shape(input_ta.output_id))
def _cut_off_connection_for_cell(self, context):
nodes_to_remove = []
all_vars = [context.time_var]
all_vars += [val for _, val in context.other_loop_vars.items()]
for val in all_vars:
# remove the node to cut off a starting node of the cell (e.g. loop body).
nodes_to_remove.append(self.g.get_node_by_name(val.switch_true_identity_output_id))
# connect NextIteration to an invalid node, to cut off a ending node of the cell.
next_iter_nodes = [n for n in self.g.get_nodes() if n.type == "NextIteration"]
self.g.replace_all_inputs(next_iter_nodes, val.next_iteration_input_id, INVLAID_INPUT_ID)
for input_ta in context.input_tas:
# remove the node to cut off connection between scan_input and the cell.
nodes_to_remove.append(self.g.get_node_by_name(input_ta.output_id))
for output_ta in context.output_tas:
# remove the node to cut off connection between scan_output and the cell.
ta_write_nodes = [n for n in self.g.get_nodes() if is_tensor_array_write_op(n)]
self.g.replace_all_inputs(ta_write_nodes, output_ta.data_input_id, INVLAID_INPUT_ID)
return nodes_to_remove
def _compose_cell_inputs_and_outputs(self, context):
log.debug("_compose_cell_inputs_and_outputs")
nodes_to_append = []
loop_state_inputs = []
loop_state_outputs = []
initial_state_and_scan_inputs = []
# change time shape to {1} since current Scan does not support
time_var, to_append = self._adapt_time_var_as_workaround(context.time_var)
nodes_to_append.extend(to_append)
log.debug("prepare cell state inputs")
vars_to_iterate = [time_var] + [val for _, val in context.other_loop_vars.items()]
for var in vars_to_iterate:
nodes = self._adapt_scan_sequence_input_or_output("input", var.enter_input_id, False)
var.enter_input_id = nodes[-1].output[0]
nodes_to_append.extend(nodes)
loop_state_inputs.append(var.switch_true_identity_output_id)
loop_state_outputs.append(var.next_iteration_input_id)
initial_state_and_scan_inputs.append(var.enter_input_id)
log.debug("prepare cell scan inputs")
loop_scan_inputs = []
for input_ta in context.input_tas:
nodes = self._adapt_scan_sequence_input_or_output("input_ta", input_ta.data_input_id, False)
input_ta.data_input_id = nodes[-1].output[0]
nodes_to_append.extend(nodes)
loop_scan_inputs.append(input_ta.output_id)
initial_state_and_scan_inputs.append(input_ta.data_input_id)
log.debug("prepare cell scan outputs")
loop_scan_outputs = []
for output_ta in context.output_tas:
loop_scan_outputs.append(output_ta.data_input_id)
scan_props = ScanProperties(initial_state_and_scan_inputs, loop_state_inputs, loop_state_outputs,
loop_scan_inputs, loop_scan_outputs)
return scan_props, nodes_to_append
def _create_scan_node(self, context, scan_props):
log.debug("create scan node")
# here we did not give the sequence_length, because
# current batch size is 1, not original batch size
# original seq_length will be used by the loop body of Scan op.
scan_node = make_onnx_node(self.g, "Scan", [""] + scan_props.initial_state_and_scan_inputs,
attr={"num_scan_inputs": len(scan_props.loop_scan_inputs)},
output_count=len(scan_props.loop_state_outputs + scan_props.loop_scan_outputs),
skip_conversion=True)
# the first state var is time-iterator.
index = 0
time_input_shape = self.g.get_shape(scan_node.input[1])
time_input_dtype = self.g.get_dtype(scan_node.input[1])
log.debug("_create_scan_node - set scan state_output shape for %s[%s]:%s",
scan_node.name, index, time_input_shape)
self.g.set_shape(scan_node.output[index], time_input_shape)
self.g.set_dtype(scan_node.output[index], time_input_dtype)
index += 1
# for other state vars
state_input_shape = self.g.get_shape(scan_node.input[2])
state_input_dtype = self.g.get_dtype(scan_node.input[2])
for i in range(len(scan_props.loop_state_outputs) - 1):
log.debug("_create_scan_node - set scan state_output shape for %s[%s]:%s",
scan_node.name, index, state_input_shape)
self.g.set_shape(scan_node.output[index], state_input_shape)
self.g.set_dtype(scan_node.output[index], state_input_dtype)
index += 1
last_scan_input_shape = self.g.get_shape(scan_node.input[-1])
batch = last_scan_input_shape[0] # should be 1
time = last_scan_input_shape[1]
for i in range(len(scan_props.loop_scan_outputs)):
scan_out_dtype = self.g.get_dtype(scan_props.loop_scan_outputs[i])
scan_output_shape = [batch, time] + self.g.get_shape(scan_props.loop_scan_outputs[i])
log.debug("scan output [%s] has shape %s, batch:%s, time: %s",
scan_props.loop_scan_outputs[i], scan_output_shape, batch, time)
log.debug("_create_scan_node - set scan scan_output shape for %s[%s]:%s",
scan_node.name, index, scan_output_shape)
self.g.set_shape(scan_node.output[index], scan_output_shape)
self.g.set_dtype(scan_node.output[index], scan_out_dtype)
index += 1
return scan_node
def _extract_and_register_cell_graph_info(self, context, scan_props, scan_node):
log.debug("_extract_cell_graph_nodes")
sub_graph_inputs = scan_props.loop_state_inputs + scan_props.loop_scan_inputs
sub_graph_outputs = scan_props.loop_state_outputs + scan_props.loop_scan_outputs
body_graph_meta = SubGraphMetadata(self.g, sub_graph_inputs, sub_graph_outputs,
scan_props.initial_state_and_scan_inputs)
# according to input and output, find the body graph
nodes, enter_nodes = self.find_subgraph(body_graph_meta, self.g)
other_enter_input_ids = []
for enter_node in enter_nodes:
# connect Enter's output to Enter's input
self.g.replace_all_inputs(self.g.get_nodes(), enter_node.output[0], enter_node.input[0])
nodes = self.g._extract_sub_graph_nodes(self.g.get_node_by_name(enter_node.input[0]))
# if the enter target subgraph contains planeholder, then we keep record that as cell boundary.
has_placeholder = None
for n in nodes:
if is_placeholder_op(n):
has_placeholder = True
break
# if there is placeholder in the Enter's input graph, then we think we should consider the Enter's input
# nodes as cell's input; otherwise, we think the input graph should be part of cell graph.
if has_placeholder is True:
log.debug("Enter input id [%s] is a subgraph containing placeholder, so make it cell boundary",
enter_node.input[0])
other_enter_input_ids.append(enter_node.input[0])
body_graph_meta.other_enter_input_ids = other_enter_input_ids
log.debug("add body graph meta data into store")
BodyGraphDict.add_body_graph_info(scan_node.name, body_graph_meta)
return nodes
def _connect_scan_with_output(self, context, scan_node):
log.debug("connect scan output with the graph")
index = 1
nodes_to_append = []
for _, val in context.other_loop_vars.items():
var_output_id = val.exit_output_id
if var_output_id:
nodes = self._adapt_scan_sequence_input_or_output("state_output_reshape",
scan_node.output[index], True)
nodes_to_append.extend(nodes)
self.g.replace_all_inputs(self.g.get_nodes(), var_output_id, nodes[-1].output[0])
index += 1
for output_ta in context.output_tas:
ta_final_output_id = output_ta.output_id
if ta_final_output_id:
nodes = self._adapt_scan_sequence_input_or_output("scan_output_reshape",
scan_node.output[index], True)
nodes_to_append.extend(nodes)
self.g.replace_all_inputs(self.g.get_nodes(), ta_final_output_id, nodes[-1].output[0])
index += 1
return nodes_to_append
def _adapt_scan_sequence_input_or_output(self, target_name, input_id, handle_output=False):
nodes_to_add = []
shape_node = make_onnx_node(self.g, "Shape", [input_id])
nodes_to_add.append(shape_node)
inferred_shape = self.g.get_shape(input_id)
if handle_output is True:
# just use a const for Reshape's shape input.
if inferred_shape is not None and inferred_shape[1:].count(-1) <= 1:
new_shape_node = self.g.make_const(utils.make_name(target_name + "_target_shape"),
np.array(inferred_shape[1:], dtype=np.int64))
else:
origin_shape_node = make_onnx_node(self.g, "Cast", [shape_node.output[0]],
{"to": onnx_pb.TensorProto.FLOAT})
nodes_to_add.append(origin_shape_node)
sliced_shape_node = make_onnx_node(self.g, "Slice", [origin_shape_node.output[0]],
{"axes": [0], "starts": [1], "ends": [sys.maxsize]})
nodes_to_add.append(sliced_shape_node)
new_shape_node = make_onnx_node(self.g, "Cast", [sliced_shape_node.output[0]],
{"to": onnx_pb.TensorProto.INT64})
nodes_to_add.append(new_shape_node)
new_shape = inferred_shape[1:]
else:
if inferred_shape is not None and inferred_shape.count(-1) <= 1:
new_shape_node = self.g.make_const(utils.make_name(target_name + "_target_shape"),
np.array([1] + inferred_shape, dtype=np.int64))
else:
fake_batch_size_node = self.g.make_const(utils.make_name(target_name + "_target_shape"),
np.array([1,], dtype=np.int64))
new_shape_node = make_onnx_node(self.g, "Concat",
[fake_batch_size_node.output[0], shape_node.output[0]],
{"axis": 0})
nodes_to_add.append(new_shape_node)
new_shape = [1] + inferred_shape
reshape_node = make_onnx_node(self.g, "Reshape", [input_id, new_shape_node.output[0]],
skip_conversion=True, op_name_scope=target_name)
nodes_to_add.append(reshape_node)
self.g.set_shape(reshape_node.output[0], new_shape)
self.g.set_dtype(reshape_node.output[0], self.g.get_dtype(input_id))
log.debug("create Reshape for scan output %s, with output shape %s",
reshape_node.output[0], new_shape)
return nodes_to_add
def _adapt_time_var_as_workaround(self, var):
log.debug("_adapt_time_var_as_workaround")
nodes_to_append = []
time_init_node = self._create_unsqueeze_node("time_var_init", var.enter_input_id)
nodes_to_append.append(time_init_node)
var.enter_input_id = time_init_node.output[0]
time_output_node = self._create_unsqueeze_node("time_var_output", var.next_iteration_input_id)
nodes_to_append.append(time_output_node)
var.next_iteration_input_id = time_output_node.output[0]
time_input_node = self._create_squeeze_node("time_var_input", var.switch_true_identity_output_id)
nodes_to_append.append(time_input_node)
self.g.replace_all_inputs(self.g.get_nodes(), var.switch_true_identity_output_id, time_input_node.output[0])
self.g.set_shape(var.switch_true_identity_output_id, [1] + self.g.get_shape(var.switch_true_identity_output_id))
return var, nodes_to_append
def _create_unsqueeze_node(self, target_name, input_id):
unsqueeze_node = make_onnx_node(self.g, "Unsqueeze", [input_id], attr={"axes": [0]},
skip_conversion=True, op_name_scope=target_name)
input_shape = self.g.get_shape(input_id)
if input_shape is None:
raise ValueError(input_id + " is none")
input_shape = [1] + input_shape
self.g.set_shape(unsqueeze_node.output[0], input_shape)
self.g.set_dtype(unsqueeze_node.output[0], self.g.get_dtype(input_id))
return unsqueeze_node
def _create_squeeze_node(self, target_name, input_id):
squeeze_node = make_onnx_node(self.g, "Squeeze", [input_id], attr={"axes": [0]},
skip_conversion=True, op_name_scope=target_name)
input_shape = self.g.get_shape(input_id)
if input_shape is None:
raise ValueError(input_id + " is none")
input_shape = list(input_shape)[1:]
self.g.set_shape(squeeze_node.output[0], input_shape)
self.g.set_dtype(squeeze_node.output[0], self.g.get_dtype(input_id))
return squeeze_node
class CustomRnnLateRewriter(object):
def __init__(self, g):
self.g = g
def rewrite(self):
log.debug("enter custom rnn late rewriter")
nodes = self.g.get_nodes()
nodes_to_remove = []
for scan_node in nodes:
if scan_node.type != "Scan":
continue
log.debug("late write for scan node %s", scan_node.name)
num_scan_inputs = scan_node.get_attr("num_scan_inputs").i
if not BodyGraphDict.has_body_graph_info(scan_node.name):
continue
body_graph_meta = BodyGraphDict.pop_body_graph_info(scan_node.name)
onnx_nodes, _ = LoopRewriterBase.find_subgraph(body_graph_meta, self.g)
nodes_to_remove.extend(onnx_nodes)
log.debug("start creating body graph for scan node %s ", scan_node.name)
body_graph_initializers = {}
const_nodes = [n for n in onnx_nodes if n.type in ("Const", "ConstV2")]
for n in const_nodes:
body_graph_initializers[n.output[0]] = self.g.initializers[n.output[0]]
onnx_nodes.remove(n)
onnx_nodes = set(onnx_nodes)
ops = []
for op in onnx_nodes:
onnx_op = op.op
ops.append(onnx_op)
body_g = Graph(ops, output_shapes=self.g._output_shapes, dtypes=self.g._dtypes)
body_g._initializers = body_graph_initializers
log.debug("start preparing body graph inputs nodes")
temp_nodes = body_g.get_nodes()
i = 0
input_count = len(body_graph_meta.input_ids)
for input_name, init_input_id in zip(body_graph_meta.input_ids, body_graph_meta.initial_input_ids):
shape = body_g.get_shape(input_name)
dtype = body_g.get_dtype(input_name)
if shape is None:
shape = self.g.get_shape(init_input_id)
if i >= input_count - num_scan_inputs:
loop_input_shape = list(shape)[2:]
else:
loop_input_shape = list(shape)
else:
loop_input_shape = list(shape)
onnx_input_shape = utils.make_onnx_shape(loop_input_shape)
val = helper.make_tensor_value_info(input_name, dtype, onnx_input_shape)
body_g.add_model_input(input_name, val)
i += 1
log.debug("start preparing body graph outputs nodes")
new_output_names = []
for o in body_graph_meta.output_ids:
# output node.
identity_name = utils.make_name("Identity")
identity_output = utils.port_name(identity_name)
node = Node(helper.make_node("Identity", [o], [identity_output], name=identity_name), body_g)
body_g.set_dtype(identity_output, body_g.get_dtype(o))
body_g.copy_shape(o, identity_output)
new_output_names.append(identity_output)
temp_nodes.append(node)
body_g.set_nodes(temp_nodes)
body_g.topological_sort(body_g.get_nodes())
log.debug("start make graph based on body graph nodes")
body_g.output_names = new_output_names
graph = body_g.make_graph("scan body graph")
scan_node.set_attr("body", graph)
# remove nodes in body graph from g
for n in set(nodes_to_remove):
if n in nodes:
nodes.remove(n)
elif self.g.is_initializer(n.output[0]):
del self.g.initializers[n.output[0]]
else:
raise ValueError("error when removing nodes")
return nodes
| true | true |
f7ff6c49d953004c2a377d68536f176a987103eb | 3,796 | py | Python | tensorflow_datasets/image/cats_vs_dogs.py | Suhasnama/datasets | 1259b2329825dfee02ab1925f41d00756d9e7bdc | [
"Apache-2.0"
] | 1 | 2020-04-14T08:08:48.000Z | 2020-04-14T08:08:48.000Z | tensorflow_datasets/image/cats_vs_dogs.py | Suhasnama/datasets | 1259b2329825dfee02ab1925f41d00756d9e7bdc | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/image/cats_vs_dogs.py | Suhasnama/datasets | 1259b2329825dfee02ab1925f41d00756d9e7bdc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cats vs Dogs dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl import logging
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """\
@Inproceedings (Conference){asirra-a-captcha-that-exploits-interest-aligned-manual-image-categorization,
author = {Elson, Jeremy and Douceur, John (JD) and Howell, Jon and Saul, Jared},
title = {Asirra: A CAPTCHA that Exploits Interest-Aligned Manual Image Categorization},
booktitle = {Proceedings of 14th ACM Conference on Computer and Communications Security (CCS)},
year = {2007},
month = {October},
publisher = {Association for Computing Machinery, Inc.},
url = {https://www.microsoft.com/en-us/research/publication/asirra-a-captcha-that-exploits-interest-aligned-manual-image-categorization/},
edition = {Proceedings of 14th ACM Conference on Computer and Communications Security (CCS)},
}
"""
_URL = ("https://download.microsoft.com/download/3/E/1/3E1C3F21-"
"ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip")
_NUM_CORRUPT_IMAGES = 1738
_DESCRIPTION = (("A large set of images of cats and dogs."
"There are %d corrupted images that are dropped.")
% _NUM_CORRUPT_IMAGES)
_NAME_RE = re.compile(r"^PetImages[\\/](Cat|Dog)[\\/]\d+\.jpg$")
class CatsVsDogs(tfds.core.GeneratorBasedBuilder):
"""Cats vs Dogs."""
VERSION = tfds.core.Version(
"4.0.0", "New split API (https://tensorflow.org/datasets/splits)")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"image": tfds.features.Image(),
"image/filename": tfds.features.Text(), # eg 'PetImages/Dog/0.jpg'
"label": tfds.features.ClassLabel(names=["cat", "dog"]),
}),
supervised_keys=("image", "label"),
homepage=
"https://www.microsoft.com/en-us/download/details.aspx?id=54765",
citation=_CITATION
)
def _split_generators(self, dl_manager):
path = dl_manager.download(_URL)
# There is no predefined train/val/test split for this dataset.
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"archive": dl_manager.iter_archive(path),
}),
]
def _generate_examples(self, archive):
"""Generate Cats vs Dogs images and labels given a directory path."""
num_skipped = 0
for fname, fobj in archive:
res = _NAME_RE.match(fname)
if not res: # README file, ...
continue
label = res.group(1).lower()
if tf.compat.as_bytes("JFIF") not in fobj.peek(10):
num_skipped += 1
continue
record = {
"image": fobj,
"image/filename": fname,
"label": label,
}
yield fname, record
if num_skipped != _NUM_CORRUPT_IMAGES:
raise ValueError("Expected %d corrupt images, but found %d" % (
_NUM_CORRUPT_IMAGES, num_skipped))
logging.warning("%d images were corrupted and were skipped", num_skipped)
| 35.476636 | 138 | 0.681507 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl import logging
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """\
@Inproceedings (Conference){asirra-a-captcha-that-exploits-interest-aligned-manual-image-categorization,
author = {Elson, Jeremy and Douceur, John (JD) and Howell, Jon and Saul, Jared},
title = {Asirra: A CAPTCHA that Exploits Interest-Aligned Manual Image Categorization},
booktitle = {Proceedings of 14th ACM Conference on Computer and Communications Security (CCS)},
year = {2007},
month = {October},
publisher = {Association for Computing Machinery, Inc.},
url = {https://www.microsoft.com/en-us/research/publication/asirra-a-captcha-that-exploits-interest-aligned-manual-image-categorization/},
edition = {Proceedings of 14th ACM Conference on Computer and Communications Security (CCS)},
}
"""
_URL = ("https://download.microsoft.com/download/3/E/1/3E1C3F21-"
"ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip")
_NUM_CORRUPT_IMAGES = 1738
_DESCRIPTION = (("A large set of images of cats and dogs."
"There are %d corrupted images that are dropped.")
% _NUM_CORRUPT_IMAGES)
_NAME_RE = re.compile(r"^PetImages[\\/](Cat|Dog)[\\/]\d+\.jpg$")
class CatsVsDogs(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version(
"4.0.0", "New split API (https://tensorflow.org/datasets/splits)")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"image": tfds.features.Image(),
"image/filename": tfds.features.Text(),
"label": tfds.features.ClassLabel(names=["cat", "dog"]),
}),
supervised_keys=("image", "label"),
homepage=
"https://www.microsoft.com/en-us/download/details.aspx?id=54765",
citation=_CITATION
)
def _split_generators(self, dl_manager):
path = dl_manager.download(_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"archive": dl_manager.iter_archive(path),
}),
]
def _generate_examples(self, archive):
num_skipped = 0
for fname, fobj in archive:
res = _NAME_RE.match(fname)
if not res:
continue
label = res.group(1).lower()
if tf.compat.as_bytes("JFIF") not in fobj.peek(10):
num_skipped += 1
continue
record = {
"image": fobj,
"image/filename": fname,
"label": label,
}
yield fname, record
if num_skipped != _NUM_CORRUPT_IMAGES:
raise ValueError("Expected %d corrupt images, but found %d" % (
_NUM_CORRUPT_IMAGES, num_skipped))
logging.warning("%d images were corrupted and were skipped", num_skipped)
| true | true |
f7ff6ca28ba89ed9483e81796f69515f8928422c | 2,484 | py | Python | mopidy_tidal/lru_cache.py | tbjep/mopidy-tidal | edc9b181e4e0da32bfc9dbe4a5f073996b4e183b | [
"Apache-2.0"
] | 30 | 2016-02-08T11:58:15.000Z | 2020-02-09T16:16:39.000Z | mopidy_tidal/lru_cache.py | tbjep/mopidy-tidal | edc9b181e4e0da32bfc9dbe4a5f073996b4e183b | [
"Apache-2.0"
] | 37 | 2020-02-27T04:45:54.000Z | 2022-03-08T14:28:51.000Z | mopidy_tidal/lru_cache.py | tbjep/mopidy-tidal | edc9b181e4e0da32bfc9dbe4a5f073996b4e183b | [
"Apache-2.0"
] | 16 | 2020-05-02T22:41:19.000Z | 2022-01-19T16:24:09.000Z | from __future__ import unicode_literals
import logging
from collections import OrderedDict
logger = logging.getLogger(__name__)
class LruCache(OrderedDict):
def __init__(self, max_size=1024):
if max_size <= 0:
raise ValueError('Invalid size')
OrderedDict.__init__(self)
self._max_size = max_size
self._check_limit()
def get_max_size(self):
return self._max_size
def hit(self, key):
if key in self:
val = self[key]
self[key] = val
# logger.debug('HIT: %r -> %r', key, val)
return val
# logger.debug('MISS: %r', key)
return None
def __setitem__(self, key, value):
if key in self:
del self[key]
OrderedDict.__setitem__(self, key, value)
self._check_limit()
def _check_limit(self):
while len(self) > self._max_size:
# delete oldest entries
k = list(self)[0]
del self[k]
class SearchCache(LruCache):
def __init__(self, func):
super(SearchCache, self).__init__()
self._func = func
def __call__(self, *args, **kwargs):
key = SearchKey(**kwargs)
cached_result = self.hit(key)
logger.info("Search cache miss" if cached_result is None
else "Search cache hit")
if cached_result is None:
cached_result = self._func(*args, **kwargs)
self[key] = cached_result
return cached_result
class SearchKey(object):
def __init__(self, **kwargs):
fixed_query = self.fix_query(kwargs["query"])
self._query = tuple(sorted(fixed_query.items()))
self._exact = kwargs["exact"]
self._hash = None
def __hash__(self):
if self._hash is None:
self._hash = hash(self._exact)
self._hash ^= hash(repr(self._query))
return self._hash
def __eq__(self, other):
if not isinstance(other, SearchKey):
return False
return self._exact == other._exact and \
self._query == other._query
@staticmethod
def fix_query(query):
"""
Removes some query parameters that otherwise will lead to a cache miss.
Eg: 'track_no' since we can't query TIDAL for a specific album's track.
:param query: query dictionary
:return: sanitized query dictionary
"""
query.pop("track_no", None)
return query
| 27 | 79 | 0.590177 | from __future__ import unicode_literals
import logging
from collections import OrderedDict
logger = logging.getLogger(__name__)
class LruCache(OrderedDict):
def __init__(self, max_size=1024):
if max_size <= 0:
raise ValueError('Invalid size')
OrderedDict.__init__(self)
self._max_size = max_size
self._check_limit()
def get_max_size(self):
return self._max_size
def hit(self, key):
if key in self:
val = self[key]
self[key] = val
return val
return None
def __setitem__(self, key, value):
if key in self:
del self[key]
OrderedDict.__setitem__(self, key, value)
self._check_limit()
def _check_limit(self):
while len(self) > self._max_size:
k = list(self)[0]
del self[k]
class SearchCache(LruCache):
def __init__(self, func):
super(SearchCache, self).__init__()
self._func = func
def __call__(self, *args, **kwargs):
key = SearchKey(**kwargs)
cached_result = self.hit(key)
logger.info("Search cache miss" if cached_result is None
else "Search cache hit")
if cached_result is None:
cached_result = self._func(*args, **kwargs)
self[key] = cached_result
return cached_result
class SearchKey(object):
def __init__(self, **kwargs):
fixed_query = self.fix_query(kwargs["query"])
self._query = tuple(sorted(fixed_query.items()))
self._exact = kwargs["exact"]
self._hash = None
def __hash__(self):
if self._hash is None:
self._hash = hash(self._exact)
self._hash ^= hash(repr(self._query))
return self._hash
def __eq__(self, other):
if not isinstance(other, SearchKey):
return False
return self._exact == other._exact and \
self._query == other._query
@staticmethod
def fix_query(query):
query.pop("track_no", None)
return query
| true | true |
f7ff6cade14a512b896f80b1e90d97c72e5c43ee | 2,900 | py | Python | CNN/dpp_regression.py | phillnguyen/schnablelab | 2e7803ed82489880d79ac0c7bce39857de5e4547 | [
"BSD-2-Clause"
] | null | null | null | CNN/dpp_regression.py | phillnguyen/schnablelab | 2e7803ed82489880d79ac0c7bce39857de5e4547 | [
"BSD-2-Clause"
] | null | null | null | CNN/dpp_regression.py | phillnguyen/schnablelab | 2e7803ed82489880d79ac0c7bce39857de5e4547 | [
"BSD-2-Clause"
] | null | null | null | """
train neural network to detect whether plant flowers or not
"""
import warnings
warnings.filterwarnings('ignore',category=FutureWarning)
from glob import glob
import numpy as np
import pickle
import deepplantphenomics as dpp
from pathlib import Path
import os
import sys
def train(train_dir, label_fn, model_dir, epoch, lr):
"""
train_dir: the directory where your training images located
label_fn: the file name of labels under train_dir. Just specify the file name don't inclde the path.
model_dir: the name of you model. Model results will be save to this dir
epoch: specify the epoch. Based on dpp document suggest 100 for plant stress and 500 for counting.
lr: specify learnning rate. 0.0001 used in dpp leaf counting example
"""
model_dir_path = Path(model_dir)
if not model_dir_path.exists():
model_dir_path.mkdir()
tensorboard_dir_path = model_dir_path/'tensorboard'
img_dir = Path(train_dir)
model = dpp.RegressionModel(debug=True, save_checkpoints=True, report_rate=150, tensorboard_dir=str(tensorboard_dir_path), save_dir=str(model_dir_path))
#model.set_batch_size(72)
model.set_batch_size(45)
#model.set_number_of_threads(10)
model.set_number_of_threads(100)
model.set_image_dimensions(418, 283, 3)
model.set_resize_images(True)
model.set_num_regression_outputs(1)
model.set_test_split(0.0)
model.set_validation_split(0.0)
model.set_learning_rate(float(lr))
model.set_weight_initializer('xavier')
model.set_maximum_training_epochs(int(epoch))
# Augmentation options
model.set_augmentation_brightness_and_contrast(True)
model.set_augmentation_flip_horizontal(True)
model.set_augmentation_flip_vertical(True)
#model.set_augmentation_crop(True)
# Load labels and images
model.load_multiple_labels_from_csv(img_dir/label_fn, id_column=0)
model.load_images_with_ids_from_directory(img_dir)
# Define a model architecture
model.add_input_layer()
model.add_convolutional_layer(filter_dimension=[5, 5, 3, 32], stride_length=1, activation_function='tanh')
model.add_pooling_layer(kernel_size=3, stride_length=2)
model.add_convolutional_layer(filter_dimension=[5, 5, 32, 64], stride_length=1, activation_function='tanh')
model.add_pooling_layer(kernel_size=3, stride_length=2)
model.add_convolutional_layer(filter_dimension=[3, 3, 64, 64], stride_length=1, activation_function='tanh')
model.add_pooling_layer(kernel_size=3, stride_length=2)
model.add_convolutional_layer(filter_dimension=[3, 3, 64, 64], stride_length=1, activation_function='tanh')
model.add_pooling_layer(kernel_size=3, stride_length=2)
model.add_output_layer()
# Begin training the model
model.begin_training()
if len(sys.argv)==6:
train(*sys.argv[1:])
else:
print('train_dir', 'label_fn', 'model_dir', 'epoch', 'lr')
| 40.84507 | 156 | 0.761034 | import warnings
warnings.filterwarnings('ignore',category=FutureWarning)
from glob import glob
import numpy as np
import pickle
import deepplantphenomics as dpp
from pathlib import Path
import os
import sys
def train(train_dir, label_fn, model_dir, epoch, lr):
model_dir_path = Path(model_dir)
if not model_dir_path.exists():
model_dir_path.mkdir()
tensorboard_dir_path = model_dir_path/'tensorboard'
img_dir = Path(train_dir)
model = dpp.RegressionModel(debug=True, save_checkpoints=True, report_rate=150, tensorboard_dir=str(tensorboard_dir_path), save_dir=str(model_dir_path))
model.set_batch_size(45)
model.set_number_of_threads(100)
model.set_image_dimensions(418, 283, 3)
model.set_resize_images(True)
model.set_num_regression_outputs(1)
model.set_test_split(0.0)
model.set_validation_split(0.0)
model.set_learning_rate(float(lr))
model.set_weight_initializer('xavier')
model.set_maximum_training_epochs(int(epoch))
model.set_augmentation_brightness_and_contrast(True)
model.set_augmentation_flip_horizontal(True)
model.set_augmentation_flip_vertical(True)
model.load_multiple_labels_from_csv(img_dir/label_fn, id_column=0)
model.load_images_with_ids_from_directory(img_dir)
model.add_input_layer()
model.add_convolutional_layer(filter_dimension=[5, 5, 3, 32], stride_length=1, activation_function='tanh')
model.add_pooling_layer(kernel_size=3, stride_length=2)
model.add_convolutional_layer(filter_dimension=[5, 5, 32, 64], stride_length=1, activation_function='tanh')
model.add_pooling_layer(kernel_size=3, stride_length=2)
model.add_convolutional_layer(filter_dimension=[3, 3, 64, 64], stride_length=1, activation_function='tanh')
model.add_pooling_layer(kernel_size=3, stride_length=2)
model.add_convolutional_layer(filter_dimension=[3, 3, 64, 64], stride_length=1, activation_function='tanh')
model.add_pooling_layer(kernel_size=3, stride_length=2)
model.add_output_layer()
model.begin_training()
if len(sys.argv)==6:
train(*sys.argv[1:])
else:
print('train_dir', 'label_fn', 'model_dir', 'epoch', 'lr')
| true | true |
f7ff6cd6405ede361a727ed253af152ccdedc331 | 4,352 | py | Python | srs/main.py | ConnerZhao/FRES | a6d82065eedf90ffaad91b242488e4aef844033d | [
"MIT"
] | null | null | null | srs/main.py | ConnerZhao/FRES | a6d82065eedf90ffaad91b242488e4aef844033d | [
"MIT"
] | null | null | null | srs/main.py | ConnerZhao/FRES | a6d82065eedf90ffaad91b242488e4aef844033d | [
"MIT"
] | null | null | null | import tkinter
from tkinter import *
from tkinter import messagebox
import cv2
import numpy as np
from keras.preprocessing import image
import warnings
warnings.filterwarnings("ignore")
from keras.preprocessing.image import load_img, img_to_array
from keras.models import load_model
import matplotlib.pyplot as plt
import numpy as np
import random
#window
tkWindow = Tk()
tkWindow.geometry('400x150')
tkWindow.title('Tkinter Login Form - pythonexamples.org')
# load model
model = load_model("best_model.h5")
face_haar_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
def scanButton():
messagebox.showinfo( title = "Look at the Camera", message= "Look at the Camera\nOnce the facial expression is labeled, press Q to stop scanning!")
while True:
f = open("emotions.txt", "w")
ret, test_img = cap.read() # captures frame and returns boolean value and captured image
if not ret:
continue
gray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2RGB)
faces_detected = face_haar_cascade.detectMultiScale(gray_img, 1.32, 5)
for (x, y, w, h) in faces_detected:
cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=6)
roi_gray = gray_img[y:y + w, x:x + h] # cropping region of interest i.e. face area from image
roi_gray = cv2.resize(roi_gray, (224, 224))
img_pixels = image.img_to_array(roi_gray)
img_pixels = np.expand_dims(img_pixels, axis=0)
img_pixels /= 255
predictions = model.predict(img_pixels)
# find max indexed arra y
max_index = np.argmax(predictions[0])
emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')
predicted_emotion = emotions[max_index]
f.write(emotions[max_index] + "\n")
cv2.putText(test_img, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
resized_img = cv2.resize(test_img, (1000, 700))
cv2.imshow('Facial emotion analysis ', resized_img)
if cv2.waitKey(10) == ord('q'): # wait until 'q' key is pressed
f.close()
break
messagebox.showinfo( title = '', message= "Scanning Completed")
cap.release()
cv2.destroyAllWindows
def read():
# Random number for random quotes
x = random.randint(1,4)
# Opens file containing emotion from scanning
y = open("emotions.txt", "rt")
# Reads the first 11 characters
z = y.read(10)
# Strips the first 11 characters, so its only text
emotion = z.strip()
print(z)
if emotion == "angry":
quote = open("angry.txt", "rt")
messagebox.showinfo( title = '', message= quote.readlines(x))
quote.close()
elif emotion == "disgust":
quote = open("disgust.txt", "rt")
messagebox.showinfo( title = '', message= quote.readlines(x))
quote.close()
elif emotion == "fear":
quote = open("fear.txt", "rt")
messagebox.showinfo( title = '', message= quote.readlines(x))
quote.close()
elif emotion == "happy":
messagebox.showinfo( title = '', message= "We're glad you are having a great day!\nKeep it up!")
quote = open("happy.txt", "rt")
messagebox.showinfo( title = '', message= quote.readlines(x))
quote.close()
elif emotion == "surprise":
quote = open("surprise.txt", "rt")
messagebox.showinfo( title = '', message= quote.readlines(x))
quote.close()
elif emotion == "sad":
quote = open("sad.txt", "rt")
messagebox.showinfo( title = '', message= quote.readlines(x))
quote.close()
else:
messagebox.showinfo( title = '', message= 'You have not scanned your facial expression yet!')
# Exit Button
quitButton = tkinter.Button(tkWindow,
text="Quit",
fg="red",
command=quit)
# init Buttons
scan = Button(tkWindow, text="Scan", fg="Green", command = scanButton)
msgButton = Button(tkWindow, text="Mesage", command = read)
scan.pack()
msgButton.pack()
quitButton.pack()
tkWindow.mainloop() | 38.857143 | 152 | 0.61443 | import tkinter
from tkinter import *
from tkinter import messagebox
import cv2
import numpy as np
from keras.preprocessing import image
import warnings
warnings.filterwarnings("ignore")
from keras.preprocessing.image import load_img, img_to_array
from keras.models import load_model
import matplotlib.pyplot as plt
import numpy as np
import random
tkWindow = Tk()
tkWindow.geometry('400x150')
tkWindow.title('Tkinter Login Form - pythonexamples.org')
model = load_model("best_model.h5")
face_haar_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
def scanButton():
messagebox.showinfo( title = "Look at the Camera", message= "Look at the Camera\nOnce the facial expression is labeled, press Q to stop scanning!")
while True:
f = open("emotions.txt", "w")
ret, test_img = cap.read()
if not ret:
continue
gray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2RGB)
faces_detected = face_haar_cascade.detectMultiScale(gray_img, 1.32, 5)
for (x, y, w, h) in faces_detected:
cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=6)
roi_gray = gray_img[y:y + w, x:x + h]
roi_gray = cv2.resize(roi_gray, (224, 224))
img_pixels = image.img_to_array(roi_gray)
img_pixels = np.expand_dims(img_pixels, axis=0)
img_pixels /= 255
predictions = model.predict(img_pixels)
max_index = np.argmax(predictions[0])
emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')
predicted_emotion = emotions[max_index]
f.write(emotions[max_index] + "\n")
cv2.putText(test_img, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
resized_img = cv2.resize(test_img, (1000, 700))
cv2.imshow('Facial emotion analysis ', resized_img)
if cv2.waitKey(10) == ord('q'):
f.close()
break
messagebox.showinfo( title = '', message= "Scanning Completed")
cap.release()
cv2.destroyAllWindows
def read():
x = random.randint(1,4)
y = open("emotions.txt", "rt")
z = y.read(10)
emotion = z.strip()
print(z)
if emotion == "angry":
quote = open("angry.txt", "rt")
messagebox.showinfo( title = '', message= quote.readlines(x))
quote.close()
elif emotion == "disgust":
quote = open("disgust.txt", "rt")
messagebox.showinfo( title = '', message= quote.readlines(x))
quote.close()
elif emotion == "fear":
quote = open("fear.txt", "rt")
messagebox.showinfo( title = '', message= quote.readlines(x))
quote.close()
elif emotion == "happy":
messagebox.showinfo( title = '', message= "We're glad you are having a great day!\nKeep it up!")
quote = open("happy.txt", "rt")
messagebox.showinfo( title = '', message= quote.readlines(x))
quote.close()
elif emotion == "surprise":
quote = open("surprise.txt", "rt")
messagebox.showinfo( title = '', message= quote.readlines(x))
quote.close()
elif emotion == "sad":
quote = open("sad.txt", "rt")
messagebox.showinfo( title = '', message= quote.readlines(x))
quote.close()
else:
messagebox.showinfo( title = '', message= 'You have not scanned your facial expression yet!')
# Exit Button
quitButton = tkinter.Button(tkWindow,
text="Quit",
fg="red",
command=quit)
# init Buttons
scan = Button(tkWindow, text="Scan", fg="Green", command = scanButton)
msgButton = Button(tkWindow, text="Mesage", command = read)
scan.pack()
msgButton.pack()
quitButton.pack()
tkWindow.mainloop() | true | true |
f7ff6db84a576f42b732b2559f58456c3d11a750 | 3,429 | py | Python | mne/utils/__init__.py | kalenkovich/mne-python | d5752051b37f74713233929382bcc632d404f837 | [
"BSD-3-Clause"
] | null | null | null | mne/utils/__init__.py | kalenkovich/mne-python | d5752051b37f74713233929382bcc632d404f837 | [
"BSD-3-Clause"
] | null | null | null | mne/utils/__init__.py | kalenkovich/mne-python | d5752051b37f74713233929382bcc632d404f837 | [
"BSD-3-Clause"
] | null | null | null | # # # WARNING # # #
# This list must also be updated in doc/_templates/autosummary/class.rst if it
# is changed here!
_doc_special_members = ('__contains__', '__getitem__', '__iter__', '__len__',
'__add__', '__sub__', '__mul__', '__div__',
'__neg__', '__hash__')
from ._bunch import Bunch, BunchConst, BunchConstNamed
from .check import (check_fname, check_version, check_random_state,
_check_fname, _check_subject, _check_pandas_installed,
_check_pandas_index_arguments, _check_mayavi_version,
_check_event_id, _check_ch_locs, _check_compensation_grade,
_check_if_nan, _is_numeric, _ensure_int, _check_preload,
_validate_type, _check_info_inv, _check_pylsl_installed,
_check_channels_spatial_filter, _check_one_ch_type,
_check_rank, _check_option, _check_depth, _check_combine)
from .config import (set_config, get_config, get_config_path, set_cache_dir,
set_memmap_min_size, get_subjects_dir, _get_stim_channel,
sys_info, _get_extra_data_path, _get_root_dir,
_get_call_line)
from .docs import (copy_function_doc_to_method_doc, copy_doc, linkcode_resolve,
open_docs, deprecated, fill_doc, copy_base_doc_to_subclass_doc)
from .fetching import _fetch_file, _url_to_local_path
from ._logging import (verbose, logger, set_log_level, set_log_file,
use_log_level, catch_logging, warn, filter_out_warnings,
ETSContext)
from .misc import (run_subprocess, _pl, _clean_names, _Counter, pformat,
_explain_exception, _get_argvalues, sizeof_fmt,
running_subprocess)
from .progressbar import ProgressBar
from ._testing import (run_tests_if_main, requires_sklearn,
requires_version, requires_nibabel, requires_mayavi,
requires_good_network, requires_mne, requires_pandas,
requires_h5py, traits_test, requires_pysurfer,
ArgvSetter, SilenceStdout, has_freesurfer, has_mne_c,
_TempDir, has_nibabel, _import_mlab, buggy_mkl_svd,
requires_numpydoc, requires_tvtk, requires_freesurfer,
requires_nitime, requires_fs_or_nibabel, requires_dipy,
requires_neuromag2ft, requires_pylsl, assert_object_equal,
assert_and_remove_boundary_annot, _raw_annot,
assert_dig_allclose, assert_meg_snr, assert_snr,
modified_env)
from .numerics import (hashfunc, md5sum, _compute_row_norms,
_reg_pinv, random_permutation, _reject_data_segments,
compute_corr, _get_inst_data, array_split_idx,
sum_squared, split_list, _gen_events, create_slices,
_time_mask, _freq_mask, grand_average, object_diff,
object_hash, object_size, _apply_scaling_cov,
_undo_scaling_cov, _apply_scaling_array,
_undo_scaling_array, _scaled_array, _replace_md5, _PCA,
_mask_to_onsets_offsets)
from .mixin import (SizeMixin, GetEpochsMixin, _prepare_read_metadata,
_prepare_write_metadata, _FakeNoPandas)
| 63.5 | 82 | 0.654418 | , '__getitem__', '__iter__', '__len__',
'__add__', '__sub__', '__mul__', '__div__',
'__neg__', '__hash__')
from ._bunch import Bunch, BunchConst, BunchConstNamed
from .check import (check_fname, check_version, check_random_state,
_check_fname, _check_subject, _check_pandas_installed,
_check_pandas_index_arguments, _check_mayavi_version,
_check_event_id, _check_ch_locs, _check_compensation_grade,
_check_if_nan, _is_numeric, _ensure_int, _check_preload,
_validate_type, _check_info_inv, _check_pylsl_installed,
_check_channels_spatial_filter, _check_one_ch_type,
_check_rank, _check_option, _check_depth, _check_combine)
from .config import (set_config, get_config, get_config_path, set_cache_dir,
set_memmap_min_size, get_subjects_dir, _get_stim_channel,
sys_info, _get_extra_data_path, _get_root_dir,
_get_call_line)
from .docs import (copy_function_doc_to_method_doc, copy_doc, linkcode_resolve,
open_docs, deprecated, fill_doc, copy_base_doc_to_subclass_doc)
from .fetching import _fetch_file, _url_to_local_path
from ._logging import (verbose, logger, set_log_level, set_log_file,
use_log_level, catch_logging, warn, filter_out_warnings,
ETSContext)
from .misc import (run_subprocess, _pl, _clean_names, _Counter, pformat,
_explain_exception, _get_argvalues, sizeof_fmt,
running_subprocess)
from .progressbar import ProgressBar
from ._testing import (run_tests_if_main, requires_sklearn,
requires_version, requires_nibabel, requires_mayavi,
requires_good_network, requires_mne, requires_pandas,
requires_h5py, traits_test, requires_pysurfer,
ArgvSetter, SilenceStdout, has_freesurfer, has_mne_c,
_TempDir, has_nibabel, _import_mlab, buggy_mkl_svd,
requires_numpydoc, requires_tvtk, requires_freesurfer,
requires_nitime, requires_fs_or_nibabel, requires_dipy,
requires_neuromag2ft, requires_pylsl, assert_object_equal,
assert_and_remove_boundary_annot, _raw_annot,
assert_dig_allclose, assert_meg_snr, assert_snr,
modified_env)
from .numerics import (hashfunc, md5sum, _compute_row_norms,
_reg_pinv, random_permutation, _reject_data_segments,
compute_corr, _get_inst_data, array_split_idx,
sum_squared, split_list, _gen_events, create_slices,
_time_mask, _freq_mask, grand_average, object_diff,
object_hash, object_size, _apply_scaling_cov,
_undo_scaling_cov, _apply_scaling_array,
_undo_scaling_array, _scaled_array, _replace_md5, _PCA,
_mask_to_onsets_offsets)
from .mixin import (SizeMixin, GetEpochsMixin, _prepare_read_metadata,
_prepare_write_metadata, _FakeNoPandas)
| true | true |
f7ff6dcf85ca9b9c6c31034aedc75c0b3c1ed25e | 975 | py | Python | tests/t-mime-type.py | kurtace72/lighttpd2 | 505bfb053f481b39ffd0f03336e4a8511f45883e | [
"Apache-2.0"
] | 395 | 2015-01-29T04:12:27.000Z | 2022-03-21T21:25:15.000Z | tests/t-mime-type.py | kurtace72/lighttpd2 | 505bfb053f481b39ffd0f03336e4a8511f45883e | [
"Apache-2.0"
] | 2 | 2016-11-30T19:09:02.000Z | 2016-11-30T19:34:26.000Z | tests/t-mime-type.py | kurtace72/lighttpd2 | 505bfb053f481b39ffd0f03336e4a8511f45883e | [
"Apache-2.0"
] | 122 | 2015-01-12T19:56:31.000Z | 2021-08-14T12:56:26.000Z | # -*- coding: utf-8 -*-
from base import *
from requests import *
class TestMimeType1(CurlRequest):
URL = "/test.txt"
EXPECT_RESPONSE_BODY = ""
EXPECT_RESPONSE_CODE = 200
EXPECT_RESPONSE_HEADERS = [ ("Content-Type", "text/plain; charset=utf-8") ]
class TestMimeType2(CurlRequest):
URL = "/test.xt"
EXPECT_RESPONSE_BODY = ""
EXPECT_RESPONSE_CODE = 200
EXPECT_RESPONSE_HEADERS = [ ("Content-Type", "text/plain") ]
class TestMimeType3(CurlRequest):
URL = "/test.rxt"
EXPECT_RESPONSE_BODY = ""
EXPECT_RESPONSE_CODE = 200
EXPECT_RESPONSE_HEADERS = [ ("Content-Type", "text/strange") ]
class Test(GroupTest):
group = [TestMimeType1,TestMimeType2,TestMimeType3]
def Prepare(self):
self.PrepareVHostFile("test.txt", "")
self.PrepareVHostFile("test.xt", "")
self.PrepareVHostFile("test.rxt", "")
self.config = """
mime_types (
".txt" => "text/plain; charset=utf-8",
".xt" => "text/plain",
".rxt" => "text/strange",
"xt" => "should-not-trigger"
);
"""
| 24.375 | 76 | 0.688205 |
from base import *
from requests import *
class TestMimeType1(CurlRequest):
URL = "/test.txt"
EXPECT_RESPONSE_BODY = ""
EXPECT_RESPONSE_CODE = 200
EXPECT_RESPONSE_HEADERS = [ ("Content-Type", "text/plain; charset=utf-8") ]
class TestMimeType2(CurlRequest):
URL = "/test.xt"
EXPECT_RESPONSE_BODY = ""
EXPECT_RESPONSE_CODE = 200
EXPECT_RESPONSE_HEADERS = [ ("Content-Type", "text/plain") ]
class TestMimeType3(CurlRequest):
URL = "/test.rxt"
EXPECT_RESPONSE_BODY = ""
EXPECT_RESPONSE_CODE = 200
EXPECT_RESPONSE_HEADERS = [ ("Content-Type", "text/strange") ]
class Test(GroupTest):
group = [TestMimeType1,TestMimeType2,TestMimeType3]
def Prepare(self):
self.PrepareVHostFile("test.txt", "")
self.PrepareVHostFile("test.xt", "")
self.PrepareVHostFile("test.rxt", "")
self.config = """
mime_types (
".txt" => "text/plain; charset=utf-8",
".xt" => "text/plain",
".rxt" => "text/strange",
"xt" => "should-not-trigger"
);
"""
| true | true |
f7ff6e93efff78a32e7e58397f3b1f7553f3bc84 | 864 | py | Python | lib/s3dump/errors/__init__.py | Placidina/s3dump | 9a34ef31e862f899c5cd77a7f6d8d2b3fdfab990 | [
"MIT"
] | null | null | null | lib/s3dump/errors/__init__.py | Placidina/s3dump | 9a34ef31e862f899c5cd77a7f6d8d2b3fdfab990 | [
"MIT"
] | null | null | null | lib/s3dump/errors/__init__.py | Placidina/s3dump | 9a34ef31e862f899c5cd77a7f6d8d2b3fdfab990 | [
"MIT"
] | null | null | null | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from s3dump.utils._text import to_native
class S3DumpError(Exception):
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None):
super(S3DumpError, self).__init__(message)
self.message = "%s" % to_native(message)
if orig_exc:
self.orig_exc = orig_exc
def __str__(self):
return self.message
def __repr__(self):
return self.message
class S3DumpAssertionError(S3DumpError, AssertionError):
"""Invalid assertion."""
pass
class S3DumpOptionsError(S3DumpError):
"""Bad or incomplete options passed."""
pass
class S3DumpParserError(S3DumpError):
"""Something was detected early that is wrong about a playbook or data file."""
pass
| 22.736842 | 110 | 0.703704 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from s3dump.utils._text import to_native
class S3DumpError(Exception):
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None):
super(S3DumpError, self).__init__(message)
self.message = "%s" % to_native(message)
if orig_exc:
self.orig_exc = orig_exc
def __str__(self):
return self.message
def __repr__(self):
return self.message
class S3DumpAssertionError(S3DumpError, AssertionError):
pass
class S3DumpOptionsError(S3DumpError):
pass
class S3DumpParserError(S3DumpError):
pass
| true | true |
f7ff6ee0745459b5987f9d4a603923efcff50432 | 3,763 | py | Python | pretrain.py | adarshchbs/adda_sketch | 25f7adf3563d8e1edb8c431fb93876bbed4d4e76 | [
"MIT"
] | null | null | null | pretrain.py | adarshchbs/adda_sketch | 25f7adf3563d8e1edb8c431fb93876bbed4d4e76 | [
"MIT"
] | null | null | null | pretrain.py | adarshchbs/adda_sketch | 25f7adf3563d8e1edb8c431fb93876bbed4d4e76 | [
"MIT"
] | null | null | null | from torch import nn
from torch import optim
import torch
import params
from utils import make_variable, save_model
from preprocess import preprocess_image
def train_src( source_encoder, source_classifier, data_loader, gpu_flag = False, gpu_name = 'cuda:0' ):
# source_classifier.train()
# source_encoder.train()
optimizer = optim.Adam( list(source_classifier.parameters())
+ list(source_encoder.parameters()) ,
lr = params.c_learning_rate,
betas = ( params.beta1, params.beta2 )
)
criterion = nn.CrossEntropyLoss()
for epoch in range( params.num_epochs_classifier ):
for step, ( images, labels ) in enumerate( data_loader.image_gen('train') ):
images = preprocess_image( array = images,
split_type = 'train',
use_gpu = gpu_flag, gpu_name=gpu_name )
labels = torch.tensor(labels,dtype=torch.long)
if(gpu_flag == True):
labels = labels.cuda(gpu_name)
optimizer.zero_grad()
preds = source_classifier( source_encoder( images ))
loss = criterion( preds, labels )
loss.backward()
optimizer.step()
# print step info
if ((step + 1) % params.log_step_pre == 0):
print("Epoch [{}/{}] Step [{}/{}]: loss={}"
.format(epoch + 1,
params.num_epochs_classifier,
step + 1,
int(data_loader.size['train']/data_loader.batch_size),
loss.data.item()))
# print(list(source_classifier.parameters()))
# eval model on test set
if ((epoch + 1) % params.eval_step_pre == 0):
eval_src(source_encoder, source_classifier, data_loader, gpu_flag=True)
# save model parameters
if ((epoch + 1) % params.save_step_pre == 0):
save_model(source_encoder, "ADDA-source-encoder-{}.pt".format(epoch + 1))
save_model(
source_classifier, "ADDA-source-classifier-{}.pt".format(epoch + 1))
# # save final model
save_model(source_encoder, "ADDA-source-encoder-final.pt")
save_model(source_classifier, "ADDA-source-classifier-final.pt")
return source_encoder, source_classifier
def eval_src( source_encoder, source_classifier, data_loader, gpu_flag = False, gpu_name = 'cuda:0' ):
loss = 0
accuracy = 0
source_encoder.eval()
source_classifier.eval()
criterion = nn.CrossEntropyLoss()
correct = 0
total = 0
for (images, labels) in data_loader.image_gen(split_type='val'):
images = preprocess_image( array = images,
split_type = 'val',
use_gpu = gpu_flag, gpu_name= gpu_name )
labels = torch.tensor(labels,dtype=torch.long)
if(gpu_flag == True):
labels = labels.cuda(gpu_name)
preds = source_classifier( source_encoder( images ))
loss += criterion( preds, labels ).item()
_, predicted = torch.max(preds.data,1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
# pred_cls = preds.data.max(1)[1]
# print(pred_cls.eq(labels.data).cpu().sum())
# accuracy += pred_cls.eq(labels.data).cpu().sum() / len(labels)
loss /= data_loader.size['val']
# accuracy /= len( data_loader )
accuracy = correct/total
print("Avg Loss = {}, Avg Accuracy = {:2%}".format(loss, accuracy))
| 33.008772 | 103 | 0.55966 | from torch import nn
from torch import optim
import torch
import params
from utils import make_variable, save_model
from preprocess import preprocess_image
def train_src( source_encoder, source_classifier, data_loader, gpu_flag = False, gpu_name = 'cuda:0' ):
optimizer = optim.Adam( list(source_classifier.parameters())
+ list(source_encoder.parameters()) ,
lr = params.c_learning_rate,
betas = ( params.beta1, params.beta2 )
)
criterion = nn.CrossEntropyLoss()
for epoch in range( params.num_epochs_classifier ):
for step, ( images, labels ) in enumerate( data_loader.image_gen('train') ):
images = preprocess_image( array = images,
split_type = 'train',
use_gpu = gpu_flag, gpu_name=gpu_name )
labels = torch.tensor(labels,dtype=torch.long)
if(gpu_flag == True):
labels = labels.cuda(gpu_name)
optimizer.zero_grad()
preds = source_classifier( source_encoder( images ))
loss = criterion( preds, labels )
loss.backward()
optimizer.step()
if ((step + 1) % params.log_step_pre == 0):
print("Epoch [{}/{}] Step [{}/{}]: loss={}"
.format(epoch + 1,
params.num_epochs_classifier,
step + 1,
int(data_loader.size['train']/data_loader.batch_size),
loss.data.item()))
if ((epoch + 1) % params.eval_step_pre == 0):
eval_src(source_encoder, source_classifier, data_loader, gpu_flag=True)
if ((epoch + 1) % params.save_step_pre == 0):
save_model(source_encoder, "ADDA-source-encoder-{}.pt".format(epoch + 1))
save_model(
source_classifier, "ADDA-source-classifier-{}.pt".format(epoch + 1))
urce_encoder, "ADDA-source-encoder-final.pt")
save_model(source_classifier, "ADDA-source-classifier-final.pt")
return source_encoder, source_classifier
def eval_src( source_encoder, source_classifier, data_loader, gpu_flag = False, gpu_name = 'cuda:0' ):
loss = 0
accuracy = 0
source_encoder.eval()
source_classifier.eval()
criterion = nn.CrossEntropyLoss()
correct = 0
total = 0
for (images, labels) in data_loader.image_gen(split_type='val'):
images = preprocess_image( array = images,
split_type = 'val',
use_gpu = gpu_flag, gpu_name= gpu_name )
labels = torch.tensor(labels,dtype=torch.long)
if(gpu_flag == True):
labels = labels.cuda(gpu_name)
preds = source_classifier( source_encoder( images ))
loss += criterion( preds, labels ).item()
_, predicted = torch.max(preds.data,1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss /= data_loader.size['val']
accuracy = correct/total
print("Avg Loss = {}, Avg Accuracy = {:2%}".format(loss, accuracy))
| true | true |
f7ff6f0d1fe91e309dc36442a2d0a4699af5f342 | 1,977 | py | Python | src/masonite/drivers/authentication/AuthTokenDriver.py | erhuabushuo/masonite1 | 5fd90bbcd1d3ab6a34c9cefea463c5fd5ff9b3a5 | [
"MIT"
] | null | null | null | src/masonite/drivers/authentication/AuthTokenDriver.py | erhuabushuo/masonite1 | 5fd90bbcd1d3ab6a34c9cefea463c5fd5ff9b3a5 | [
"MIT"
] | null | null | null | src/masonite/drivers/authentication/AuthTokenDriver.py | erhuabushuo/masonite1 | 5fd90bbcd1d3ab6a34c9cefea463c5fd5ff9b3a5 | [
"MIT"
] | null | null | null | """AuthTokenDriver Module."""
from ...contracts import AuthContract
from ...drivers import BaseDriver
from ...app import App
class AuthTokenDriver(BaseDriver, AuthContract):
def __init__(self, app: App):
"""AuthTokenDriver initializer.
Arguments:
request {masonite.request.Request} -- The Masonite request class.
"""
self.app = app
def user(self, auth_model):
"""Gets the user based on this driver implementation
Arguments:
auth_model {orator.orm.Model} -- An Orator ORM type object.
Returns:
Model|bool
"""
request = self.app.make("Request")
authorization = request.header("Authorization")
if authorization:
token = authorization.split(" ")[1]
elif self.app.make("Request").get_cookie("token"):
token = self.app.make("Request").get_cookie("token")
else:
token = None
if token is not None and auth_model:
return (
auth_model.where(
"remember_token", token
).first()
or False
)
return False
def save(self, remember_token, **_):
"""Saves the cookie to some state.
In this case the state is saving to a cookie.
Arguments:
remember_token {string} -- A token containing the state.
Returns:
bool
"""
return self.app.make("Request").cookie("token", remember_token)
def delete(self):
"""Deletes the state depending on the implementation of this driver.
Returns:
bool
"""
return self.app.make("Request").delete_cookie("token")
def logout(self):
"""Deletes the state depending on the implementation of this driver.
Returns:
bool
"""
self.delete()
self.app.make("Request").reset_user()
| 26.36 | 77 | 0.560445 |
from ...contracts import AuthContract
from ...drivers import BaseDriver
from ...app import App
class AuthTokenDriver(BaseDriver, AuthContract):
def __init__(self, app: App):
self.app = app
def user(self, auth_model):
request = self.app.make("Request")
authorization = request.header("Authorization")
if authorization:
token = authorization.split(" ")[1]
elif self.app.make("Request").get_cookie("token"):
token = self.app.make("Request").get_cookie("token")
else:
token = None
if token is not None and auth_model:
return (
auth_model.where(
"remember_token", token
).first()
or False
)
return False
def save(self, remember_token, **_):
return self.app.make("Request").cookie("token", remember_token)
def delete(self):
return self.app.make("Request").delete_cookie("token")
def logout(self):
self.delete()
self.app.make("Request").reset_user()
| true | true |
f7ff6f29c42dd8aba1b73df957f103208197a748 | 244 | py | Python | config.py | n1k0ver3E/NFT_Monitor | ea9bc824590825d72560795ad153602d5f35a32b | [
"MIT"
] | 5 | 2021-10-30T03:59:20.000Z | 2021-12-24T08:51:39.000Z | config.py | n1k0ver3E/NFT_Monitor | ea9bc824590825d72560795ad153602d5f35a32b | [
"MIT"
] | 1 | 2021-12-03T15:01:35.000Z | 2021-12-06T11:59:42.000Z | config.py | n1k0ver3E/NFT_Monitor | ea9bc824590825d72560795ad153602d5f35a32b | [
"MIT"
] | 3 | 2021-11-15T11:05:01.000Z | 2022-02-18T01:50:18.000Z | params = {
"TOTAL_SUPPLY": 10000,
"TEST_BOT_TOKEN" : "",
"PROD_BOT_TOKEN" : "",
"COLLECTION_CONTRACT_ADDRESS" : "0x0a8901b0e25deb55a87524f0cc164e9644020eba",
"CONTRACT_ADDRESS": "0x17539cca21c7933df5c980172d22659b8c345c5a"
} | 34.857143 | 81 | 0.729508 | params = {
"TOTAL_SUPPLY": 10000,
"TEST_BOT_TOKEN" : "",
"PROD_BOT_TOKEN" : "",
"COLLECTION_CONTRACT_ADDRESS" : "0x0a8901b0e25deb55a87524f0cc164e9644020eba",
"CONTRACT_ADDRESS": "0x17539cca21c7933df5c980172d22659b8c345c5a"
} | true | true |
f7ff70ee444017888450bf4f7b266c07709c39ff | 1,342 | py | Python | main/migrations/0001_initial.py | Emmastro/africanlibraries | 6755dd5a7d3453c7ba6e63d49071f9f5af280f71 | [
"Apache-2.0"
] | null | null | null | main/migrations/0001_initial.py | Emmastro/africanlibraries | 6755dd5a7d3453c7ba6e63d49071f9f5af280f71 | [
"Apache-2.0"
] | null | null | null | main/migrations/0001_initial.py | Emmastro/africanlibraries | 6755dd5a7d3453c7ba6e63d49071f9f5af280f71 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 4.0.2 on 2022-02-18 18:52
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Payment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(auto_now=True)),
('amount', models.SmallIntegerField(null=True)),
],
),
migrations.CreateModel(
name='SchoolAdmin',
fields=[
('reader_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.reader')),
('payement', models.ManyToManyField(to='main.Payment')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=('account.reader',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 31.952381 | 191 | 0.557377 |
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Payment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(auto_now=True)),
('amount', models.SmallIntegerField(null=True)),
],
),
migrations.CreateModel(
name='SchoolAdmin',
fields=[
('reader_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.reader')),
('payement', models.ManyToManyField(to='main.Payment')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=('account.reader',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| true | true |
f7ff72d1afddf700afb1487426bc4bfd6bebd700 | 5,409 | py | Python | lib/datasets/factory.py | denglixi/faster-rcnn.pytorch | 12158fa2ec998ba3733a4696b7a4e08a35c157e3 | [
"MIT"
] | null | null | null | lib/datasets/factory.py | denglixi/faster-rcnn.pytorch | 12158fa2ec998ba3733a4696b7a4e08a35c157e3 | [
"MIT"
] | null | null | null | lib/datasets/factory.py | denglixi/faster-rcnn.pytorch | 12158fa2ec998ba3733a4696b7a4e08a35c157e3 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Factory method for easily getting imdbs by name."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
from datasets.imagenet import imagenet
from datasets.vg import vg
from datasets.food import food
from datasets.food_data import food_merge_imdb
from datasets.school_lunch import school_lunch
from datasets.food_meta_data import food_meta_imdb
from datasets.minipro import minipro
__sets = {}
for canteen in ["Arts"]:
for split in ['train', 'test']:
name = 'food_meta_{}_{}'.format(canteen, split)
categories = "{}_trainval".format(canteen)
__sets[name] = (lambda split=split, canteen=canteen:
food_meta_imdb(split, canteen, categories))
# Set up food_<canteen>_<split>_<trainingcategories>
splits = ['train', 'val', 'trainval', 'inner', 'test']
mt_splits = []
for n in [0, 10, 30, 50, 100]:
for s in splits:
mt_splits += [s+"mt{}".format(n)]
splits += mt_splits
innersplit = []
for sp in ['val', 'test']:
for m in [10, 30, 50]:
innersplit.append('innermt{}{}'.format(m, sp))
splits += innersplit
# take few sample in inner between dataset of canteen and dataset of excl canteen as training data. And regard the lefts as validation.
inner_few = []
for fewN in [0, 1, 3, 5, 10]:
for mtN in [10]:
for d in ['train', 'val', 'test']:
inner_few += ["innerfew{}mt{}{}".format(fewN, mtN, d)]
splits += inner_few
for cantee in ['exclYIH', "All", "exclArts", "exclUTown", "Science", "exclScience", "exclTechChicken", "exclTechMixedVeg", "YIH", "Arts", "TechChicken", "TechMixedVeg", "UTown", "EconomicBeeHoon"]:
for split in splits:
for category in ['exclYIH', "All", "exclArts", "exclUTown", "Science", "exclScience", "exclTechChicken", "exclTechMixedVeg", "YIH", "Arts", "TechChicken", "TechMixedVeg", "UTown", "EconomicBeeHoon"]:
category_train = category + '_train'
name = 'food_{}_{}_{}'.format(cantee, split, category_train)
__sets[name] = (lambda split=split,
cantee=cantee, category_train=category_train: food_merge_imdb(split, cantee, category_train))
for n in [10, 30, 50, 100]:
category_mt10 = category + '_train_mt{}'.format(n)
name = 'food_{}_{}_{}'.format(cantee, split, category_mt10)
__sets[name] = (lambda split=split,
cantee=cantee, category_mt10=category_mt10: food_merge_imdb(split, cantee, category_mt10))
#__sets["Food_EconomicBeeHoon_train"] = food_meta_imdb(train, )
# Set up school lunch
for split in ['train', 'val', 'trainval', 'test']:
name = 'schoollunch_{}'.format(split)
__sets[name] = (lambda split=split: school_lunch(split))
# Set up voc_<year>_<split>
for year in ['2007', '2012']:
for split in ['train', 'val', 'trainval', 'test']:
name = 'voc_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: pascal_voc(split, year))
# Set up coco_2014_<split>
for year in ['2014']:
for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2014_cap_<split>
for year in ['2014']:
for split in ['train', 'val', 'capval', 'valminuscapval', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2015_<split>
for year in ['2015']:
for split in ['test', 'test-dev']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up vg_<split>
# for version in ['1600-400-20']:
# for split in ['minitrain', 'train', 'minival', 'val', 'test']:
# name = 'vg_{}_{}'.format(version,split)
# __sets[name] = (lambda split=split, version=version: vg(version, split))
for version in ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']:
for split in ['minitrain', 'smalltrain', 'train', 'minival', 'smallval', 'val', 'test']:
name = 'vg_{}_{}'.format(version, split)
__sets[name] = (lambda split=split,
version=version: vg(version, split))
# set up image net.
for split in ['train', 'val', 'val1', 'val2', 'test']:
name = 'imagenet_{}'.format(split)
devkit_path = 'data/imagenet/ILSVRC/devkit'
data_path = 'data/imagenet/ILSVRC'
__sets[name] = (lambda split=split, devkit_path=devkit_path,
data_path=data_path: imagenet(split, devkit_path, data_path))
for split in ['train', 'val']:
__sets[name] = (lambda split=split,
: minipro(split))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return list(__sets.keys())
| 40.669173 | 207 | 0.623405 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
from datasets.imagenet import imagenet
from datasets.vg import vg
from datasets.food import food
from datasets.food_data import food_merge_imdb
from datasets.school_lunch import school_lunch
from datasets.food_meta_data import food_meta_imdb
from datasets.minipro import minipro
__sets = {}
for canteen in ["Arts"]:
for split in ['train', 'test']:
name = 'food_meta_{}_{}'.format(canteen, split)
categories = "{}_trainval".format(canteen)
__sets[name] = (lambda split=split, canteen=canteen:
food_meta_imdb(split, canteen, categories))
splits = ['train', 'val', 'trainval', 'inner', 'test']
mt_splits = []
for n in [0, 10, 30, 50, 100]:
for s in splits:
mt_splits += [s+"mt{}".format(n)]
splits += mt_splits
innersplit = []
for sp in ['val', 'test']:
for m in [10, 30, 50]:
innersplit.append('innermt{}{}'.format(m, sp))
splits += innersplit
inner_few = []
for fewN in [0, 1, 3, 5, 10]:
for mtN in [10]:
for d in ['train', 'val', 'test']:
inner_few += ["innerfew{}mt{}{}".format(fewN, mtN, d)]
splits += inner_few
for cantee in ['exclYIH', "All", "exclArts", "exclUTown", "Science", "exclScience", "exclTechChicken", "exclTechMixedVeg", "YIH", "Arts", "TechChicken", "TechMixedVeg", "UTown", "EconomicBeeHoon"]:
for split in splits:
for category in ['exclYIH', "All", "exclArts", "exclUTown", "Science", "exclScience", "exclTechChicken", "exclTechMixedVeg", "YIH", "Arts", "TechChicken", "TechMixedVeg", "UTown", "EconomicBeeHoon"]:
category_train = category + '_train'
name = 'food_{}_{}_{}'.format(cantee, split, category_train)
__sets[name] = (lambda split=split,
cantee=cantee, category_train=category_train: food_merge_imdb(split, cantee, category_train))
for n in [10, 30, 50, 100]:
category_mt10 = category + '_train_mt{}'.format(n)
name = 'food_{}_{}_{}'.format(cantee, split, category_mt10)
__sets[name] = (lambda split=split,
cantee=cantee, category_mt10=category_mt10: food_merge_imdb(split, cantee, category_mt10))
for split in ['train', 'val', 'trainval', 'test']:
name = 'schoollunch_{}'.format(split)
__sets[name] = (lambda split=split: school_lunch(split))
for year in ['2007', '2012']:
for split in ['train', 'val', 'trainval', 'test']:
name = 'voc_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: pascal_voc(split, year))
for year in ['2014']:
for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
for year in ['2014']:
for split in ['train', 'val', 'capval', 'valminuscapval', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
for year in ['2015']:
for split in ['test', 'test-dev']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
for version in ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']:
for split in ['minitrain', 'smalltrain', 'train', 'minival', 'smallval', 'val', 'test']:
name = 'vg_{}_{}'.format(version, split)
__sets[name] = (lambda split=split,
version=version: vg(version, split))
for split in ['train', 'val', 'val1', 'val2', 'test']:
name = 'imagenet_{}'.format(split)
devkit_path = 'data/imagenet/ILSVRC/devkit'
data_path = 'data/imagenet/ILSVRC'
__sets[name] = (lambda split=split, devkit_path=devkit_path,
data_path=data_path: imagenet(split, devkit_path, data_path))
for split in ['train', 'val']:
__sets[name] = (lambda split=split,
: minipro(split))
def get_imdb(name):
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
return list(__sets.keys())
| true | true |
f7ff73ce0ff4e0c6ce61f52c4d633a5c530abd7b | 2,965 | py | Python | youtubeplayer/src/YouTubeAddPlayList.py | TwolDE2/enigma2-plugins | 06685a5ce6a65a8724d3b32c8f7906714650ca2c | [
"OLDAP-2.3"
] | 30 | 2015-05-08T22:10:00.000Z | 2022-03-13T22:09:31.000Z | youtubeplayer/src/YouTubeAddPlayList.py | TwolDE2/enigma2-plugins | 06685a5ce6a65a8724d3b32c8f7906714650ca2c | [
"OLDAP-2.3"
] | 124 | 2015-04-27T21:30:48.000Z | 2022-03-29T10:21:39.000Z | youtubeplayer/src/YouTubeAddPlayList.py | TwolDE2/enigma2-plugins | 06685a5ce6a65a8724d3b32c8f7906714650ca2c | [
"OLDAP-2.3"
] | 193 | 2015-01-10T09:21:26.000Z | 2022-03-21T08:19:33.000Z | from __future__ import absolute_import
############################################################################
# Copyright (C) 2008 by Volker Christian #
# Volker.Christian@fh-hagenberg.at #
# #
# This program is free software; you can redistribute it and#or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
from Screens.Screen import Screen
from Components.config import Config
from Components.config import ConfigText
from Components.config import ConfigYesNo
from Components.config import getConfigListEntry
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap
from Components.Button import Button
from .ConfigTextWithSuggestions import ConfigTextWithSuggestions
from . import _
playlistContext = Config()
playlistContext.name = ConfigText(_("Name"), False)
playlistContext.description = ConfigText(_("Description"), False)
playlistContext.private = ConfigYesNo()
class YouTubeAddPlaylistDialog(Screen, ConfigListScreen):
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
self["actions"] = ActionMap(["YouTubeAddPlaylistActions"],
{
"save": self.keySave,
"cancel": self.keyCancel
}, -2)
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("Save"))
self["key_yellow"] = Button("")
self["key_blue"] = Button("")
cfglist = []
cfglist.append(getConfigListEntry(_("Playlist Name"), playlistContext.name))
cfglist.append(getConfigListEntry(_("Playlist Description"), playlistContext.description))
cfglist.append(getConfigListEntry(_("private"), playlistContext.private))
ConfigListScreen.__init__(self, cfglist, session)
def keySave(self):
self.close(True, playlistContext)
def keyCancel(self):
self.close(False, playlistContext)
| 41.760563 | 92 | 0.594266 | from __future__ import absolute_import
| true | true |
f7ff75b5a3d6527654c7677b64c264d80246c1f3 | 167 | py | Python | Algorithms/10-Regular_Expression_Matching.py | PrismSpirit/Leetcode | 5444fe5274abe0e8c2f15c365d4e81a078511c0b | [
"MIT"
] | null | null | null | Algorithms/10-Regular_Expression_Matching.py | PrismSpirit/Leetcode | 5444fe5274abe0e8c2f15c365d4e81a078511c0b | [
"MIT"
] | null | null | null | Algorithms/10-Regular_Expression_Matching.py | PrismSpirit/Leetcode | 5444fe5274abe0e8c2f15c365d4e81a078511c0b | [
"MIT"
] | null | null | null | import re
class Solution:
def isMatch(self, s: str, p: str) -> bool:
if re.fullmatch(p, s):
return True
else:
return False | 20.875 | 46 | 0.520958 | import re
class Solution:
def isMatch(self, s: str, p: str) -> bool:
if re.fullmatch(p, s):
return True
else:
return False | true | true |
f7ff762a24e5e6fb13bc8f7e21fa0a320773288c | 1,223 | py | Python | ml/models/random_forest.py | refactoring-ai/Machine-Learning | 908d35322a06a7b1709d83f731033a939a864c6b | [
"MIT"
] | 5 | 2020-09-02T19:46:37.000Z | 2021-04-21T15:41:11.000Z | ml/models/random_forest.py | refactoring-ai/Machine-Learning | 908d35322a06a7b1709d83f731033a939a864c6b | [
"MIT"
] | 16 | 2020-08-04T08:07:50.000Z | 2021-03-23T08:15:30.000Z | ml/models/random_forest.py | refactoring-ai/Machine-Learning | 908d35322a06a7b1709d83f731033a939a864c6b | [
"MIT"
] | 1 | 2021-04-17T18:34:47.000Z | 2021-04-17T18:34:47.000Z | from sklearn.ensemble import RandomForestClassifier
from configs import CORE_COUNT, SEED
from ml.models.base import SupervisedMLRefactoringModel
class RandomForestRefactoringModel(SupervisedMLRefactoringModel):
def feature_reduction(self) -> bool:
return False
def params_to_tune(self):
return {
"max_depth": [3, 6, 12, 24, None],
"max_features": ["auto", "log2", None],
"min_samples_split": [2, 3, 4, 5, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
"n_estimators": [10, 50, 100, 150, 200]
}
def model(self, best_params=None):
if best_params is not None:
return RandomForestClassifier(
random_state=SEED,
n_jobs=CORE_COUNT,
max_depth=best_params["max_depth"],
max_features=best_params["max_features"],
min_samples_split=best_params["min_samples_split"],
bootstrap=best_params["bootstrap"],
criterion=best_params["criterion"],
n_estimators=best_params["n_estimators"],
)
return RandomForestClassifier(random_state=SEED)
| 34.942857 | 67 | 0.604252 | from sklearn.ensemble import RandomForestClassifier
from configs import CORE_COUNT, SEED
from ml.models.base import SupervisedMLRefactoringModel
class RandomForestRefactoringModel(SupervisedMLRefactoringModel):
def feature_reduction(self) -> bool:
return False
def params_to_tune(self):
return {
"max_depth": [3, 6, 12, 24, None],
"max_features": ["auto", "log2", None],
"min_samples_split": [2, 3, 4, 5, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
"n_estimators": [10, 50, 100, 150, 200]
}
def model(self, best_params=None):
if best_params is not None:
return RandomForestClassifier(
random_state=SEED,
n_jobs=CORE_COUNT,
max_depth=best_params["max_depth"],
max_features=best_params["max_features"],
min_samples_split=best_params["min_samples_split"],
bootstrap=best_params["bootstrap"],
criterion=best_params["criterion"],
n_estimators=best_params["n_estimators"],
)
return RandomForestClassifier(random_state=SEED)
| true | true |
f7ff76b26de96e2fb9e62d2c50c5eda84862e2b2 | 11,132 | py | Python | tests/test_hvactemplatezonefancoil.py | marcelosalles/pyidf | c2f744211572b5e14e29522aac1421ba88addb0e | [
"Apache-2.0"
] | 19 | 2015-12-08T23:33:51.000Z | 2022-01-31T04:41:10.000Z | tests/test_hvactemplatezonefancoil.py | marcelosalles/pyidf | c2f744211572b5e14e29522aac1421ba88addb0e | [
"Apache-2.0"
] | 2 | 2019-10-04T10:57:00.000Z | 2021-10-01T06:46:17.000Z | tests/test_hvactemplatezonefancoil.py | marcelosalles/pyidf | c2f744211572b5e14e29522aac1421ba88addb0e | [
"Apache-2.0"
] | 7 | 2015-11-04T02:25:01.000Z | 2021-12-08T03:14:28.000Z | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.hvac_templates import HvactemplateZoneFanCoil
log = logging.getLogger(__name__)
class TestHvactemplateZoneFanCoil(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_hvactemplatezonefancoil(self):
pyidf.validation_level = ValidationLevel.error
obj = HvactemplateZoneFanCoil()
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# object-list
var_template_thermostat_name = "object-list|Template Thermostat Name"
obj.template_thermostat_name = var_template_thermostat_name
# real
var_supply_air_maximum_flow_rate = 3.3
obj.supply_air_maximum_flow_rate = var_supply_air_maximum_flow_rate
# real
var_zone_heating_sizing_factor = 0.0
obj.zone_heating_sizing_factor = var_zone_heating_sizing_factor
# real
var_zone_cooling_sizing_factor = 0.0
obj.zone_cooling_sizing_factor = var_zone_cooling_sizing_factor
# alpha
var_outdoor_air_method = "Flow/Person"
obj.outdoor_air_method = var_outdoor_air_method
# real
var_outdoor_air_flow_rate_per_person = 7.7
obj.outdoor_air_flow_rate_per_person = var_outdoor_air_flow_rate_per_person
# real
var_outdoor_air_flow_rate_per_zone_floor_area = 8.8
obj.outdoor_air_flow_rate_per_zone_floor_area = var_outdoor_air_flow_rate_per_zone_floor_area
# real
var_outdoor_air_flow_rate_per_zone = 9.9
obj.outdoor_air_flow_rate_per_zone = var_outdoor_air_flow_rate_per_zone
# object-list
var_system_availability_schedule_name = "object-list|System Availability Schedule Name"
obj.system_availability_schedule_name = var_system_availability_schedule_name
# real
var_supply_fan_total_efficiency = 0.50005
obj.supply_fan_total_efficiency = var_supply_fan_total_efficiency
# real
var_supply_fan_delta_pressure = 0.0
obj.supply_fan_delta_pressure = var_supply_fan_delta_pressure
# real
var_supply_fan_motor_efficiency = 0.50005
obj.supply_fan_motor_efficiency = var_supply_fan_motor_efficiency
# real
var_supply_fan_motor_in_air_stream_fraction = 0.5
obj.supply_fan_motor_in_air_stream_fraction = var_supply_fan_motor_in_air_stream_fraction
# alpha
var_cooling_coil_type = "ChilledWater"
obj.cooling_coil_type = var_cooling_coil_type
# object-list
var_cooling_coil_availability_schedule_name = "object-list|Cooling Coil Availability Schedule Name"
obj.cooling_coil_availability_schedule_name = var_cooling_coil_availability_schedule_name
# real
var_cooling_coil_design_setpoint = 17.17
obj.cooling_coil_design_setpoint = var_cooling_coil_design_setpoint
# alpha
var_heating_coil_type = "HotWater"
obj.heating_coil_type = var_heating_coil_type
# object-list
var_heating_coil_availability_schedule_name = "object-list|Heating Coil Availability Schedule Name"
obj.heating_coil_availability_schedule_name = var_heating_coil_availability_schedule_name
# real
var_heating_coil_design_setpoint = 20.2
obj.heating_coil_design_setpoint = var_heating_coil_design_setpoint
# object-list
var_dedicated_outdoor_air_system_name = "object-list|Dedicated Outdoor Air System Name"
obj.dedicated_outdoor_air_system_name = var_dedicated_outdoor_air_system_name
# alpha
var_zone_cooling_design_supply_air_temperature_input_method = "SupplyAirTemperature"
obj.zone_cooling_design_supply_air_temperature_input_method = var_zone_cooling_design_supply_air_temperature_input_method
# real
var_zone_cooling_design_supply_air_temperature_difference = 23.23
obj.zone_cooling_design_supply_air_temperature_difference = var_zone_cooling_design_supply_air_temperature_difference
# alpha
var_zone_heating_design_supply_air_temperature_input_method = "SupplyAirTemperature"
obj.zone_heating_design_supply_air_temperature_input_method = var_zone_heating_design_supply_air_temperature_input_method
# real
var_zone_heating_design_supply_air_temperature_difference = 25.25
obj.zone_heating_design_supply_air_temperature_difference = var_zone_heating_design_supply_air_temperature_difference
# object-list
var_design_specification_outdoor_air_object_name = "object-list|Design Specification Outdoor Air Object Name"
obj.design_specification_outdoor_air_object_name = var_design_specification_outdoor_air_object_name
# object-list
var_design_specification_zone_air_distribution_object_name = "object-list|Design Specification Zone Air Distribution Object Name"
obj.design_specification_zone_air_distribution_object_name = var_design_specification_zone_air_distribution_object_name
# alpha
var_capacity_control_method = "ConstantFanVariableFlow"
obj.capacity_control_method = var_capacity_control_method
# real
var_low_speed_supply_air_flow_ratio = 0.0001
obj.low_speed_supply_air_flow_ratio = var_low_speed_supply_air_flow_ratio
# real
var_medium_speed_supply_air_flow_ratio = 0.0001
obj.medium_speed_supply_air_flow_ratio = var_medium_speed_supply_air_flow_ratio
# object-list
var_outdoor_air_schedule_name = "object-list|Outdoor Air Schedule Name"
obj.outdoor_air_schedule_name = var_outdoor_air_schedule_name
# alpha
var_baseboard_heating_type = "HotWater"
obj.baseboard_heating_type = var_baseboard_heating_type
# object-list
var_baseboard_heating_availability_schedule_name = "object-list|Baseboard Heating Availability Schedule Name"
obj.baseboard_heating_availability_schedule_name = var_baseboard_heating_availability_schedule_name
# real
var_baseboard_heating_capacity = 34.34
obj.baseboard_heating_capacity = var_baseboard_heating_capacity
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.hvactemplatezonefancoils[0].zone_name, var_zone_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].template_thermostat_name, var_template_thermostat_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_air_maximum_flow_rate, var_supply_air_maximum_flow_rate)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].zone_heating_sizing_factor, var_zone_heating_sizing_factor)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].zone_cooling_sizing_factor, var_zone_cooling_sizing_factor)
self.assertEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_method, var_outdoor_air_method)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_flow_rate_per_person, var_outdoor_air_flow_rate_per_person)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_flow_rate_per_zone_floor_area, var_outdoor_air_flow_rate_per_zone_floor_area)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_flow_rate_per_zone, var_outdoor_air_flow_rate_per_zone)
self.assertEqual(idf2.hvactemplatezonefancoils[0].system_availability_schedule_name, var_system_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_fan_total_efficiency, var_supply_fan_total_efficiency)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_fan_delta_pressure, var_supply_fan_delta_pressure)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_fan_motor_efficiency, var_supply_fan_motor_efficiency)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_fan_motor_in_air_stream_fraction, var_supply_fan_motor_in_air_stream_fraction)
self.assertEqual(idf2.hvactemplatezonefancoils[0].cooling_coil_type, var_cooling_coil_type)
self.assertEqual(idf2.hvactemplatezonefancoils[0].cooling_coil_availability_schedule_name, var_cooling_coil_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].cooling_coil_design_setpoint, var_cooling_coil_design_setpoint)
self.assertEqual(idf2.hvactemplatezonefancoils[0].heating_coil_type, var_heating_coil_type)
self.assertEqual(idf2.hvactemplatezonefancoils[0].heating_coil_availability_schedule_name, var_heating_coil_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].heating_coil_design_setpoint, var_heating_coil_design_setpoint)
self.assertEqual(idf2.hvactemplatezonefancoils[0].dedicated_outdoor_air_system_name, var_dedicated_outdoor_air_system_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].zone_cooling_design_supply_air_temperature_input_method, var_zone_cooling_design_supply_air_temperature_input_method)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].zone_cooling_design_supply_air_temperature_difference, var_zone_cooling_design_supply_air_temperature_difference)
self.assertEqual(idf2.hvactemplatezonefancoils[0].zone_heating_design_supply_air_temperature_input_method, var_zone_heating_design_supply_air_temperature_input_method)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].zone_heating_design_supply_air_temperature_difference, var_zone_heating_design_supply_air_temperature_difference)
self.assertEqual(idf2.hvactemplatezonefancoils[0].design_specification_outdoor_air_object_name, var_design_specification_outdoor_air_object_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].design_specification_zone_air_distribution_object_name, var_design_specification_zone_air_distribution_object_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].capacity_control_method, var_capacity_control_method)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].low_speed_supply_air_flow_ratio, var_low_speed_supply_air_flow_ratio)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].medium_speed_supply_air_flow_ratio, var_medium_speed_supply_air_flow_ratio)
self.assertEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_schedule_name, var_outdoor_air_schedule_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].baseboard_heating_type, var_baseboard_heating_type)
self.assertEqual(idf2.hvactemplatezonefancoils[0].baseboard_heating_availability_schedule_name, var_baseboard_heating_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].baseboard_heating_capacity, var_baseboard_heating_capacity) | 65.482353 | 177 | 0.801922 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.hvac_templates import HvactemplateZoneFanCoil
log = logging.getLogger(__name__)
class TestHvactemplateZoneFanCoil(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_hvactemplatezonefancoil(self):
pyidf.validation_level = ValidationLevel.error
obj = HvactemplateZoneFanCoil()
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
var_template_thermostat_name = "object-list|Template Thermostat Name"
obj.template_thermostat_name = var_template_thermostat_name
var_supply_air_maximum_flow_rate = 3.3
obj.supply_air_maximum_flow_rate = var_supply_air_maximum_flow_rate
var_zone_heating_sizing_factor = 0.0
obj.zone_heating_sizing_factor = var_zone_heating_sizing_factor
var_zone_cooling_sizing_factor = 0.0
obj.zone_cooling_sizing_factor = var_zone_cooling_sizing_factor
var_outdoor_air_method = "Flow/Person"
obj.outdoor_air_method = var_outdoor_air_method
var_outdoor_air_flow_rate_per_person = 7.7
obj.outdoor_air_flow_rate_per_person = var_outdoor_air_flow_rate_per_person
var_outdoor_air_flow_rate_per_zone_floor_area = 8.8
obj.outdoor_air_flow_rate_per_zone_floor_area = var_outdoor_air_flow_rate_per_zone_floor_area
var_outdoor_air_flow_rate_per_zone = 9.9
obj.outdoor_air_flow_rate_per_zone = var_outdoor_air_flow_rate_per_zone
var_system_availability_schedule_name = "object-list|System Availability Schedule Name"
obj.system_availability_schedule_name = var_system_availability_schedule_name
var_supply_fan_total_efficiency = 0.50005
obj.supply_fan_total_efficiency = var_supply_fan_total_efficiency
var_supply_fan_delta_pressure = 0.0
obj.supply_fan_delta_pressure = var_supply_fan_delta_pressure
var_supply_fan_motor_efficiency = 0.50005
obj.supply_fan_motor_efficiency = var_supply_fan_motor_efficiency
var_supply_fan_motor_in_air_stream_fraction = 0.5
obj.supply_fan_motor_in_air_stream_fraction = var_supply_fan_motor_in_air_stream_fraction
var_cooling_coil_type = "ChilledWater"
obj.cooling_coil_type = var_cooling_coil_type
var_cooling_coil_availability_schedule_name = "object-list|Cooling Coil Availability Schedule Name"
obj.cooling_coil_availability_schedule_name = var_cooling_coil_availability_schedule_name
var_cooling_coil_design_setpoint = 17.17
obj.cooling_coil_design_setpoint = var_cooling_coil_design_setpoint
var_heating_coil_type = "HotWater"
obj.heating_coil_type = var_heating_coil_type
var_heating_coil_availability_schedule_name = "object-list|Heating Coil Availability Schedule Name"
obj.heating_coil_availability_schedule_name = var_heating_coil_availability_schedule_name
var_heating_coil_design_setpoint = 20.2
obj.heating_coil_design_setpoint = var_heating_coil_design_setpoint
var_dedicated_outdoor_air_system_name = "object-list|Dedicated Outdoor Air System Name"
obj.dedicated_outdoor_air_system_name = var_dedicated_outdoor_air_system_name
var_zone_cooling_design_supply_air_temperature_input_method = "SupplyAirTemperature"
obj.zone_cooling_design_supply_air_temperature_input_method = var_zone_cooling_design_supply_air_temperature_input_method
var_zone_cooling_design_supply_air_temperature_difference = 23.23
obj.zone_cooling_design_supply_air_temperature_difference = var_zone_cooling_design_supply_air_temperature_difference
var_zone_heating_design_supply_air_temperature_input_method = "SupplyAirTemperature"
obj.zone_heating_design_supply_air_temperature_input_method = var_zone_heating_design_supply_air_temperature_input_method
var_zone_heating_design_supply_air_temperature_difference = 25.25
obj.zone_heating_design_supply_air_temperature_difference = var_zone_heating_design_supply_air_temperature_difference
var_design_specification_outdoor_air_object_name = "object-list|Design Specification Outdoor Air Object Name"
obj.design_specification_outdoor_air_object_name = var_design_specification_outdoor_air_object_name
var_design_specification_zone_air_distribution_object_name = "object-list|Design Specification Zone Air Distribution Object Name"
obj.design_specification_zone_air_distribution_object_name = var_design_specification_zone_air_distribution_object_name
var_capacity_control_method = "ConstantFanVariableFlow"
obj.capacity_control_method = var_capacity_control_method
var_low_speed_supply_air_flow_ratio = 0.0001
obj.low_speed_supply_air_flow_ratio = var_low_speed_supply_air_flow_ratio
var_medium_speed_supply_air_flow_ratio = 0.0001
obj.medium_speed_supply_air_flow_ratio = var_medium_speed_supply_air_flow_ratio
var_outdoor_air_schedule_name = "object-list|Outdoor Air Schedule Name"
obj.outdoor_air_schedule_name = var_outdoor_air_schedule_name
var_baseboard_heating_type = "HotWater"
obj.baseboard_heating_type = var_baseboard_heating_type
var_baseboard_heating_availability_schedule_name = "object-list|Baseboard Heating Availability Schedule Name"
obj.baseboard_heating_availability_schedule_name = var_baseboard_heating_availability_schedule_name
var_baseboard_heating_capacity = 34.34
obj.baseboard_heating_capacity = var_baseboard_heating_capacity
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.hvactemplatezonefancoils[0].zone_name, var_zone_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].template_thermostat_name, var_template_thermostat_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_air_maximum_flow_rate, var_supply_air_maximum_flow_rate)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].zone_heating_sizing_factor, var_zone_heating_sizing_factor)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].zone_cooling_sizing_factor, var_zone_cooling_sizing_factor)
self.assertEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_method, var_outdoor_air_method)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_flow_rate_per_person, var_outdoor_air_flow_rate_per_person)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_flow_rate_per_zone_floor_area, var_outdoor_air_flow_rate_per_zone_floor_area)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_flow_rate_per_zone, var_outdoor_air_flow_rate_per_zone)
self.assertEqual(idf2.hvactemplatezonefancoils[0].system_availability_schedule_name, var_system_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_fan_total_efficiency, var_supply_fan_total_efficiency)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_fan_delta_pressure, var_supply_fan_delta_pressure)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_fan_motor_efficiency, var_supply_fan_motor_efficiency)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_fan_motor_in_air_stream_fraction, var_supply_fan_motor_in_air_stream_fraction)
self.assertEqual(idf2.hvactemplatezonefancoils[0].cooling_coil_type, var_cooling_coil_type)
self.assertEqual(idf2.hvactemplatezonefancoils[0].cooling_coil_availability_schedule_name, var_cooling_coil_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].cooling_coil_design_setpoint, var_cooling_coil_design_setpoint)
self.assertEqual(idf2.hvactemplatezonefancoils[0].heating_coil_type, var_heating_coil_type)
self.assertEqual(idf2.hvactemplatezonefancoils[0].heating_coil_availability_schedule_name, var_heating_coil_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].heating_coil_design_setpoint, var_heating_coil_design_setpoint)
self.assertEqual(idf2.hvactemplatezonefancoils[0].dedicated_outdoor_air_system_name, var_dedicated_outdoor_air_system_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].zone_cooling_design_supply_air_temperature_input_method, var_zone_cooling_design_supply_air_temperature_input_method)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].zone_cooling_design_supply_air_temperature_difference, var_zone_cooling_design_supply_air_temperature_difference)
self.assertEqual(idf2.hvactemplatezonefancoils[0].zone_heating_design_supply_air_temperature_input_method, var_zone_heating_design_supply_air_temperature_input_method)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].zone_heating_design_supply_air_temperature_difference, var_zone_heating_design_supply_air_temperature_difference)
self.assertEqual(idf2.hvactemplatezonefancoils[0].design_specification_outdoor_air_object_name, var_design_specification_outdoor_air_object_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].design_specification_zone_air_distribution_object_name, var_design_specification_zone_air_distribution_object_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].capacity_control_method, var_capacity_control_method)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].low_speed_supply_air_flow_ratio, var_low_speed_supply_air_flow_ratio)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].medium_speed_supply_air_flow_ratio, var_medium_speed_supply_air_flow_ratio)
self.assertEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_schedule_name, var_outdoor_air_schedule_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].baseboard_heating_type, var_baseboard_heating_type)
self.assertEqual(idf2.hvactemplatezonefancoils[0].baseboard_heating_availability_schedule_name, var_baseboard_heating_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].baseboard_heating_capacity, var_baseboard_heating_capacity) | true | true |
f7ff76ebf8e6a32ea44e8ab33b0745f0ef10439a | 12,844 | py | Python | Homework4/ars_motion_controller_pid/source/ars_motion_controller.py | Moado/Robotics-ROS | c5aca2dffa6c5c9376e1cda8624ed611ffb11ca0 | [
"MIT"
] | null | null | null | Homework4/ars_motion_controller_pid/source/ars_motion_controller.py | Moado/Robotics-ROS | c5aca2dffa6c5c9376e1cda8624ed611ffb11ca0 | [
"MIT"
] | null | null | null | Homework4/ars_motion_controller_pid/source/ars_motion_controller.py | Moado/Robotics-ROS | c5aca2dffa6c5c9376e1cda8624ed611ffb11ca0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import numpy as np
from numpy import *
import os
# ROS
import rospy
import tf_conversions as tf
#
import ars_lib_helpers
#
import ars_pid
class ArsMotionController:
#######
# References
#
flag_set_robot_pose_ref = False
robot_posi_ref = None
robot_atti_quat_simp_ref = None
#
flag_set_robot_velo_world_ref = False
robot_velo_lin_world_ref = None
robot_velo_ang_world_ref = None
#
flag_set_robot_velo_cmd_ref = False
# m/s
robot_velo_lin_cmd_ref = None
# rad/s
robot_velo_ang_cmd_ref = None
# Feedback
#
flag_set_robot_pose = False
robot_posi = None
robot_atti_quat_simp = None
#
flag_set_robot_vel_world = False
robot_velo_lin_world = None
robot_velo_ang_world = None
# Commands
robot_velo_cmd_time_stamp = rospy.Time(0.0, 0.0)
robot_velo_lin_cmd = None
robot_velo_ang_cmd = None
# Loops Internal
# Vel loop
# Not needed!
#
#vel_loop_time_stamp_ros = rospy.Time(0.0, 0.0)
#vel_loop_out_lin_cmd = None
#vel_loop_out_ang_cmd = None
# Pos loop
#
pos_loop_time_stamp_ros = rospy.Time(0.0, 0.0)
flag_set_pos_loop_out = False
pos_loop_out_lin_cmd = None
pos_loop_out_ang_cmd = None
# PIDs
# Pose
#
flag_ctr_pos_hor = True
pos_hor_pid = ars_pid.PID()
#
flag_ctr_pos_z = True
pos_z_pid = ars_pid.PID()
#
flag_ctr_att_yaw = True
att_yaw_pid = ars_pid.PID()
# Velocity
#
flag_ctr_vel_lin_hor = True
vel_lin_hor_pid = ars_pid.PID()
#
flag_ctr_vel_lin_z = True
vel_lin_z_pid = ars_pid.PID()
#
flag_ctr_vel_ang_z = True
vel_ang_z_pid = ars_pid.PID()
#########
def __init__(self):
# Commands
self.robot_velo_cmd_time_stamp = rospy.Time(0.0, 0.0)
self.robot_velo_lin_cmd = np.zeros((3,), dtype=float)
self.robot_velo_ang_cmd = np.zeros((1,), dtype=float)
# Feedback
#
self.flag_set_robot_pose = False
self.robot_posi = np.zeros((3,), dtype=float)
self.robot_atti_quat_simp = ars_lib_helpers.Quaternion.zerosQuatSimp()
#
self.flag_set_robot_vel_world = False
self.robot_velo_lin_world = np.zeros((3,), dtype=float)
self.robot_velo_ang_world = np.zeros((1,), dtype=float)
# References
#
self.flag_set_robot_pose_ref = False
self.robot_posi_ref = np.zeros((3,), dtype=float)
self.robot_atti_quat_simp_ref = ars_lib_helpers.Quaternion.zerosQuatSimp()
#
self.flag_set_robot_velo_world_ref = False
self.robot_velo_lin_world_ref = np.zeros((3,), dtype=float)
self.robot_velo_ang_world_ref = np.zeros((1,), dtype=float)
#
self.flag_set_robot_velo_cmd_ref = False
self.robot_velo_lin_cmd_ref = np.zeros((3,), dtype=float)
self.robot_velo_ang_cmd_ref = np.zeros((1,), dtype=float)
# Internal
# Vel loop
# Not needed!
#self.vel_loop_time_stamp_ros = rospy.Time(0.0, 0.0)
#self.vel_loop_out_lin_cmd = np.zeros((3,1), dtype=float)
#self.vel_loop_out_ang_cmd = np.zeros((1,1), dtype=float)
# Pos loop
self.pos_loop_time_stamp_ros = rospy.Time(0.0, 0.0)
self.flag_set_pos_loop_out = False
self.pos_loop_out_lin_cmd = np.zeros((3,), dtype=float)
self.pos_loop_out_ang_cmd = np.zeros((1,), dtype=float)
# PIDs
# Pos
#
self.flag_ctr_pos_hor = True
self.pos_hor_pid = ars_pid.PID()
self.pos_hor_pid.setGainsPID(gain_P=1.0)
self.pos_hor_pid.setAntiWindUp(-0.1, 0.1)
self.pos_hor_pid.setCtrCmdSaturation(-5.0, 5.0)
#
self.flag_ctr_pos_z = True
self.pos_z_pid = ars_pid.PID()
self.pos_z_pid.setGainsPID(gain_P=1.0)
self.pos_z_pid.setAntiWindUp(-0.1, 0.1)
self.pos_z_pid.setCtrCmdSaturation(-5.0, 5.0)
#
self.flag_ctr_att_yaw = True
self.att_yaw_pid = ars_pid.PID()
self.att_yaw_pid.setGainsPID(gain_P=1.0)
self.att_yaw_pid.setAntiWindUp(-0.1, 0.1)
self.att_yaw_pid.setCtrCmdSaturation(-5.0, 5.0)
# Vel
#
self.flag_ctr_vel_lin_hor = True
self.vel_lin_hor_pid = ars_pid.PID()
self.vel_lin_hor_pid.setGainsPID(gain_P=1.0)
self.vel_lin_hor_pid.setAntiWindUp(-0.1, 0.1)
self.vel_lin_hor_pid.setCtrCmdSaturation(-1.0, 1.0)
#
self.flag_ctr_vel_lin_z = True
self.vel_lin_z_pid = ars_pid.PID()
self.vel_lin_z_pid.setGainsPID(gain_P=1.0)
self.vel_lin_z_pid.setAntiWindUp(-0.1, 0.1)
self.vel_lin_z_pid.setCtrCmdSaturation(-1.0, 1.0)
#
self.flag_ctr_vel_ang_z = True
self.vel_ang_z_pid = ars_pid.PID()
self.vel_ang_z_pid.setGainsPID(gain_P=1.0)
self.vel_ang_z_pid.setAntiWindUp(-0.1, 0.1)
self.vel_ang_z_pid.setCtrCmdSaturation(-1.0, 1.0)
# End
return
def setRobotPosRef(self, robot_posi_ref, robot_atti_quat_simp_ref):
self.flag_set_robot_pose_ref = True
self.robot_posi_ref = robot_posi_ref
self.robot_atti_quat_simp_ref = robot_atti_quat_simp_ref
return
def setRobotVelWorldRef(self, lin_vel_world_ref, ang_vel_world_ref):
self.flag_set_robot_velo_world_ref = True
self.robot_velo_lin_world_ref = lin_vel_world_ref
self.robot_velo_ang_world_ref = ang_vel_world_ref
return
def setRobotVelCmdRef(self, lin_vel_cmd_ref, ang_vel_cmd_ref):
self.flag_set_robot_velo_cmd_ref = True
self.robot_velo_lin_cmd_ref = lin_vel_cmd_ref
self.robot_velo_ang_cmd_ref = ang_vel_cmd_ref
return
def setRobotPose(self, robot_posi, robot_atti_quat_simp):
self.flag_set_robot_pose = True
self.robot_posi = robot_posi
self.robot_atti_quat_simp = robot_atti_quat_simp
return
def setRobotVelWorld(self, lin_vel_world, ang_vel_world):
self.flag_set_robot_vel_world = True
self.robot_velo_lin_world = lin_vel_world
self.robot_velo_ang_world = ang_vel_world
return
def getRobotVeloCmdTimeStamp(self):
return self.robot_velo_cmd_time_stamp
def getRobotVeloLinCmd(self):
return self.robot_velo_lin_cmd
def getRobotVeloAngCmd(self):
return self.robot_velo_ang_cmd
def velLoopMotionController(self, time_stamp_ros):
# Time stamp
self.robot_velo_cmd_time_stamp = time_stamp_ros
# Conversion (from world to robot)
# Reference
pos_loop_out_lin_cmd_robot = ars_lib_helpers.Conversions.convertVelLinFromWorldToRobot(self.pos_loop_out_lin_cmd, self.robot_atti_quat_simp)
pos_loop_out_ang_cmd_robot = ars_lib_helpers.Conversions.convertVelAngFromWorldToRobot(self.pos_loop_out_ang_cmd, self.robot_atti_quat_simp)
# Feedback
robot_velo_lin_robot = ars_lib_helpers.Conversions.convertVelLinFromWorldToRobot(self.robot_velo_lin_world, self.robot_atti_quat_simp)
robot_velo_ang_robot = ars_lib_helpers.Conversions.convertVelAngFromWorldToRobot(self.robot_velo_ang_world, self.robot_atti_quat_simp)
# Initialization
robot_velo_lin_cmd_ff = np.zeros((3,), dtype=float)
robot_velo_lin_cmd_fb = np.zeros((3,), dtype=float)
robot_velo_ang_cmd_ff = np.zeros((1,), dtype=float)
robot_velo_ang_cmd_fb = np.zeros((1,), dtype=float)
# Velocity Linear horizontal (x & y)
# Feedforward
# TODO by student
# Use: self.robot_velo_lin_cmd_ref[0:2]
robot_velo_lin_cmd_ff[0:2] = self.robot_velo_lin_cmd_ref[0:2]
# Feedback
if(self.flag_ctr_vel_lin_hor and self.flag_set_pos_loop_out and self.flag_set_robot_vel_world):
# TODO by student
# Use: pos_loop_out_lin_cmd_robot[0:2], robot_velo_lin_robot[0:2], time_stamp_ros, self.vel_lin_hor_pid
#error = reference - feedback
error_velo_lin_horizontal = pos_loop_out_lin_cmd_robot[0:2] - robot_velo_lin_robot[0:2]
mod_error_velo_lin_horizontal = math.sqrt(error_velo_lin_horizontal[0]**2 + error_velo_lin_horizontal[1]**2)
if mod_error_velo_lin_horizontal != 0:
#normalized_error
normalized_error_velo_lin_horizontal = error_velo_lin_horizontal / mod_error_velo_lin_horizontal
else:
normalized_error_robot_posi = 0
#output = normalized_error * ctr(mod_error)
ctr_vel_lin = self.vel_lin_hor_pid.call(time_stamp_ros, mod_error_velo_lin_horizontal)
robot_velo_lin_cmd_fb[0:2] = normalized_error_velo_lin_horizontal * ctr_vel_lin
# Total
# TODO by student
# Use: robot_velo_lin_cmd_ff[0:2], robot_velo_lin_cmd_fb[0:2]
self.robot_velo_lin_cmd[0:2] = robot_velo_lin_cmd_ff[0:2] + robot_velo_lin_cmd_fb[0:2]
# Velocity Linear vertical (z)
# Feedforward
# TODO by student
# Use self.robot_velo_lin_cmd_ref[2]
robot_velo_lin_cmd_ff[2] = self.robot_velo_lin_cmd_ref[2]
# Feedback
if(self.flag_ctr_vel_lin_z and self.flag_set_pos_loop_out and self.flag_set_robot_vel_world):
# TODO by student
# Use: pos_loop_out_lin_cmd_robot[2], robot_velo_lin_robot[2], time_stamp_ros, self.vel_lin_z_pid
#error = reference - feedback
error_velo_lin_vertical = pos_loop_out_lin_cmd_robot[2] - robot_velo_lin_robot[2]
#output = error * ctr(mod_error)
ctr_vel_lin = self.vel_lin_z_pid.call(time_stamp_ros, error_velo_lin_vertical)
robot_velo_lin_cmd_fb[2] = ctr_vel_lin
# Total
# TODO by student
# Use: robot_velo_lin_cmd_ff[2], robot_velo_lin_cmd_fb[2]
self.robot_velo_lin_cmd[2] = robot_velo_lin_cmd_ff[2] + robot_velo_lin_cmd_fb[2]
# Velocity Angular (z)
# Feedforward
robot_velo_ang_cmd_ff[0] = self.robot_velo_ang_cmd_ref[0]
# Feedback
if(self.flag_ctr_vel_ang_z and self.flag_set_pos_loop_out and self.flag_set_robot_vel_world):
error_vel_ang_z = pos_loop_out_ang_cmd_robot - robot_velo_ang_robot
robot_velo_ang_cmd_fb[0] = self.vel_ang_z_pid.call(time_stamp_ros, error_vel_ang_z)
# Total
self.robot_velo_ang_cmd[0] = robot_velo_ang_cmd_ff[0] + robot_velo_ang_cmd_fb[0]
# End
return
def posLoopMotionController(self, time_stamp_ros):
# Time stamp
self.pos_loop_time_stamp_ros = time_stamp_ros
# Initialization
pos_loop_out_lin_cmd_ff = np.zeros((3,), dtype=float)
pos_loop_out_lin_cmd_fb = np.zeros((3,), dtype=float)
pos_loop_out_ang_cmd_ff = np.zeros((1,), dtype=float)
pos_loop_out_ang_cmd_fb = np.zeros((1,), dtype=float)
# Linear horizontal (x & y)
# Feedforward
# TODO by student
# Use: self.robot_velo_lin_world_ref[0:2]
pos_loop_out_lin_cmd_ff[0:2] = self.robot_velo_lin_world_ref[0:2]
# Feedback
if(self.flag_ctr_pos_hor and self.flag_set_robot_pose and self.flag_set_robot_pose_ref):
# TODO by student
# Use: self.robot_posi_ref[0:2], self.robot_posi[0:2], time_stamp_ros, self.pos_hor_pid
#error = reference - feedback
error_robot_posi = self.robot_posi_ref[0:2] - self.robot_posi[0:2]
mod_error_robot_posi = math.sqrt(error_robot_posi[0]**2 + error_robot_posi[1]**2)
if mod_error_robot_posi !=0:
#normalized_error
normalized_error_robot_posi = error_robot_posi / mod_error_robot_posi
else:
normalized_error_robot_posi = 0
#output = normalized_error * ctr(mod_error)
ctr_robot_posi = self.pos_hor_pid.call(time_stamp_ros, mod_error_robot_posi)
pos_loop_out_lin_cmd_fb[0:2] = normalized_error_robot_posi * ctr_robot_posi
# Total
# TODO by student
# Use: pos_loop_out_lin_cmd_ff[0:2], pos_loop_out_lin_cmd_fb[0:2]
self.pos_loop_out_lin_cmd[0:2] = pos_loop_out_lin_cmd_ff[0:2] + pos_loop_out_lin_cmd_fb[0:2]
# Linear vertical (z)
# Feedforward
# TODO by student
# Use: self.robot_velo_lin_world_ref[2]
pos_loop_out_lin_cmd_ff[2] = self.robot_velo_lin_world_ref[2]
# Feedback
if(self.flag_ctr_pos_z and self.flag_set_robot_pose and self.flag_set_robot_pose_ref):
# TODO by student
# Use: self.robot_posi_ref[2], self.robot_posi[2], time_stamp_ros, self.pos_z_pid
#error = reference - feedback
error_pos_loop = self.robot_posi_ref[2] - self.robot_posi[2]
#output = error * ctr(mod_error)
ctr_pos_loop = self.pos_z_pid.call(time_stamp_ros, error_pos_loop)
pos_loop_out_lin_cmd_fb[2] = ctr_pos_loop
# Total
# TODO by student
# Use: pos_loop_out_lin_cmd_ff[2], pos_loop_out_lin_cmd_fb[2]
self.pos_loop_out_lin_cmd[2] = pos_loop_out_lin_cmd_ff[2] + pos_loop_out_lin_cmd_fb[2]
# Angular (z)
# Feedforward
pos_loop_out_ang_cmd_ff[0] = self.robot_velo_ang_world_ref[0]
# Feedback
if(self.flag_ctr_att_yaw and self.flag_set_robot_pose and self.flag_set_robot_pose_ref):
error_att_z = ars_lib_helpers.Quaternion.errorDiffFromQuatSimp(self.robot_atti_quat_simp_ref,self.robot_atti_quat_simp)
pos_loop_out_ang_cmd_fb[0] = self.att_yaw_pid.call(time_stamp_ros, error_att_z)
# Total
self.pos_loop_out_ang_cmd[0] = pos_loop_out_ang_cmd_ff[0] + pos_loop_out_ang_cmd_fb[0]
# Flag
self.flag_set_pos_loop_out = True
# End
return | 29.391304 | 144 | 0.726721 |
import numpy as np
from numpy import *
import os
import rospy
import tf_conversions as tf
import ars_lib_helpers
import ars_pid
class ArsMotionController:
bot_pose_ref = False
robot_posi_ref = None
robot_atti_quat_simp_ref = None
flag_set_robot_velo_world_ref = False
robot_velo_lin_world_ref = None
robot_velo_ang_world_ref = None
flag_set_robot_velo_cmd_ref = False
robot_velo_lin_cmd_ref = None
robot_velo_ang_cmd_ref = None
flag_set_robot_pose = False
robot_posi = None
robot_atti_quat_simp = None
flag_set_robot_vel_world = False
robot_velo_lin_world = None
robot_velo_ang_world = None
robot_velo_cmd_time_stamp = rospy.Time(0.0, 0.0)
robot_velo_lin_cmd = None
robot_velo_ang_cmd = None
pos_loop_time_stamp_ros = rospy.Time(0.0, 0.0)
flag_set_pos_loop_out = False
pos_loop_out_lin_cmd = None
pos_loop_out_ang_cmd = None
flag_ctr_pos_hor = True
pos_hor_pid = ars_pid.PID()
flag_ctr_pos_z = True
pos_z_pid = ars_pid.PID()
flag_ctr_att_yaw = True
att_yaw_pid = ars_pid.PID()
flag_ctr_vel_lin_hor = True
vel_lin_hor_pid = ars_pid.PID()
flag_ctr_vel_lin_z = True
vel_lin_z_pid = ars_pid.PID()
flag_ctr_vel_ang_z = True
vel_ang_z_pid = ars_pid.PID()
lf.robot_velo_cmd_time_stamp = rospy.Time(0.0, 0.0)
self.robot_velo_lin_cmd = np.zeros((3,), dtype=float)
self.robot_velo_ang_cmd = np.zeros((1,), dtype=float)
self.flag_set_robot_pose = False
self.robot_posi = np.zeros((3,), dtype=float)
self.robot_atti_quat_simp = ars_lib_helpers.Quaternion.zerosQuatSimp()
self.flag_set_robot_vel_world = False
self.robot_velo_lin_world = np.zeros((3,), dtype=float)
self.robot_velo_ang_world = np.zeros((1,), dtype=float)
self.flag_set_robot_pose_ref = False
self.robot_posi_ref = np.zeros((3,), dtype=float)
self.robot_atti_quat_simp_ref = ars_lib_helpers.Quaternion.zerosQuatSimp()
self.flag_set_robot_velo_world_ref = False
self.robot_velo_lin_world_ref = np.zeros((3,), dtype=float)
self.robot_velo_ang_world_ref = np.zeros((1,), dtype=float)
self.flag_set_robot_velo_cmd_ref = False
self.robot_velo_lin_cmd_ref = np.zeros((3,), dtype=float)
self.robot_velo_ang_cmd_ref = np.zeros((1,), dtype=float)
self.pos_loop_time_stamp_ros = rospy.Time(0.0, 0.0)
self.flag_set_pos_loop_out = False
self.pos_loop_out_lin_cmd = np.zeros((3,), dtype=float)
self.pos_loop_out_ang_cmd = np.zeros((1,), dtype=float)
self.flag_ctr_pos_hor = True
self.pos_hor_pid = ars_pid.PID()
self.pos_hor_pid.setGainsPID(gain_P=1.0)
self.pos_hor_pid.setAntiWindUp(-0.1, 0.1)
self.pos_hor_pid.setCtrCmdSaturation(-5.0, 5.0)
self.flag_ctr_pos_z = True
self.pos_z_pid = ars_pid.PID()
self.pos_z_pid.setGainsPID(gain_P=1.0)
self.pos_z_pid.setAntiWindUp(-0.1, 0.1)
self.pos_z_pid.setCtrCmdSaturation(-5.0, 5.0)
self.flag_ctr_att_yaw = True
self.att_yaw_pid = ars_pid.PID()
self.att_yaw_pid.setGainsPID(gain_P=1.0)
self.att_yaw_pid.setAntiWindUp(-0.1, 0.1)
self.att_yaw_pid.setCtrCmdSaturation(-5.0, 5.0)
self.flag_ctr_vel_lin_hor = True
self.vel_lin_hor_pid = ars_pid.PID()
self.vel_lin_hor_pid.setGainsPID(gain_P=1.0)
self.vel_lin_hor_pid.setAntiWindUp(-0.1, 0.1)
self.vel_lin_hor_pid.setCtrCmdSaturation(-1.0, 1.0)
self.flag_ctr_vel_lin_z = True
self.vel_lin_z_pid = ars_pid.PID()
self.vel_lin_z_pid.setGainsPID(gain_P=1.0)
self.vel_lin_z_pid.setAntiWindUp(-0.1, 0.1)
self.vel_lin_z_pid.setCtrCmdSaturation(-1.0, 1.0)
self.flag_ctr_vel_ang_z = True
self.vel_ang_z_pid = ars_pid.PID()
self.vel_ang_z_pid.setGainsPID(gain_P=1.0)
self.vel_ang_z_pid.setAntiWindUp(-0.1, 0.1)
self.vel_ang_z_pid.setCtrCmdSaturation(-1.0, 1.0)
return
def setRobotPosRef(self, robot_posi_ref, robot_atti_quat_simp_ref):
self.flag_set_robot_pose_ref = True
self.robot_posi_ref = robot_posi_ref
self.robot_atti_quat_simp_ref = robot_atti_quat_simp_ref
return
def setRobotVelWorldRef(self, lin_vel_world_ref, ang_vel_world_ref):
self.flag_set_robot_velo_world_ref = True
self.robot_velo_lin_world_ref = lin_vel_world_ref
self.robot_velo_ang_world_ref = ang_vel_world_ref
return
def setRobotVelCmdRef(self, lin_vel_cmd_ref, ang_vel_cmd_ref):
self.flag_set_robot_velo_cmd_ref = True
self.robot_velo_lin_cmd_ref = lin_vel_cmd_ref
self.robot_velo_ang_cmd_ref = ang_vel_cmd_ref
return
def setRobotPose(self, robot_posi, robot_atti_quat_simp):
self.flag_set_robot_pose = True
self.robot_posi = robot_posi
self.robot_atti_quat_simp = robot_atti_quat_simp
return
def setRobotVelWorld(self, lin_vel_world, ang_vel_world):
self.flag_set_robot_vel_world = True
self.robot_velo_lin_world = lin_vel_world
self.robot_velo_ang_world = ang_vel_world
return
def getRobotVeloCmdTimeStamp(self):
return self.robot_velo_cmd_time_stamp
def getRobotVeloLinCmd(self):
return self.robot_velo_lin_cmd
def getRobotVeloAngCmd(self):
return self.robot_velo_ang_cmd
def velLoopMotionController(self, time_stamp_ros):
self.robot_velo_cmd_time_stamp = time_stamp_ros
pos_loop_out_lin_cmd_robot = ars_lib_helpers.Conversions.convertVelLinFromWorldToRobot(self.pos_loop_out_lin_cmd, self.robot_atti_quat_simp)
pos_loop_out_ang_cmd_robot = ars_lib_helpers.Conversions.convertVelAngFromWorldToRobot(self.pos_loop_out_ang_cmd, self.robot_atti_quat_simp)
robot_velo_lin_robot = ars_lib_helpers.Conversions.convertVelLinFromWorldToRobot(self.robot_velo_lin_world, self.robot_atti_quat_simp)
robot_velo_ang_robot = ars_lib_helpers.Conversions.convertVelAngFromWorldToRobot(self.robot_velo_ang_world, self.robot_atti_quat_simp)
robot_velo_lin_cmd_ff = np.zeros((3,), dtype=float)
robot_velo_lin_cmd_fb = np.zeros((3,), dtype=float)
robot_velo_ang_cmd_ff = np.zeros((1,), dtype=float)
robot_velo_ang_cmd_fb = np.zeros((1,), dtype=float)
robot_velo_lin_cmd_ff[0:2] = self.robot_velo_lin_cmd_ref[0:2]
if(self.flag_ctr_vel_lin_hor and self.flag_set_pos_loop_out and self.flag_set_robot_vel_world):
error_velo_lin_horizontal = pos_loop_out_lin_cmd_robot[0:2] - robot_velo_lin_robot[0:2]
mod_error_velo_lin_horizontal = math.sqrt(error_velo_lin_horizontal[0]**2 + error_velo_lin_horizontal[1]**2)
if mod_error_velo_lin_horizontal != 0:
normalized_error_velo_lin_horizontal = error_velo_lin_horizontal / mod_error_velo_lin_horizontal
else:
normalized_error_robot_posi = 0
ctr_vel_lin = self.vel_lin_hor_pid.call(time_stamp_ros, mod_error_velo_lin_horizontal)
robot_velo_lin_cmd_fb[0:2] = normalized_error_velo_lin_horizontal * ctr_vel_lin
self.robot_velo_lin_cmd[0:2] = robot_velo_lin_cmd_ff[0:2] + robot_velo_lin_cmd_fb[0:2]
robot_velo_lin_cmd_ff[2] = self.robot_velo_lin_cmd_ref[2]
if(self.flag_ctr_vel_lin_z and self.flag_set_pos_loop_out and self.flag_set_robot_vel_world):
error_velo_lin_vertical = pos_loop_out_lin_cmd_robot[2] - robot_velo_lin_robot[2]
ctr_vel_lin = self.vel_lin_z_pid.call(time_stamp_ros, error_velo_lin_vertical)
robot_velo_lin_cmd_fb[2] = ctr_vel_lin
self.robot_velo_lin_cmd[2] = robot_velo_lin_cmd_ff[2] + robot_velo_lin_cmd_fb[2]
robot_velo_ang_cmd_ff[0] = self.robot_velo_ang_cmd_ref[0]
if(self.flag_ctr_vel_ang_z and self.flag_set_pos_loop_out and self.flag_set_robot_vel_world):
error_vel_ang_z = pos_loop_out_ang_cmd_robot - robot_velo_ang_robot
robot_velo_ang_cmd_fb[0] = self.vel_ang_z_pid.call(time_stamp_ros, error_vel_ang_z)
self.robot_velo_ang_cmd[0] = robot_velo_ang_cmd_ff[0] + robot_velo_ang_cmd_fb[0]
return
def posLoopMotionController(self, time_stamp_ros):
self.pos_loop_time_stamp_ros = time_stamp_ros
pos_loop_out_lin_cmd_ff = np.zeros((3,), dtype=float)
pos_loop_out_lin_cmd_fb = np.zeros((3,), dtype=float)
pos_loop_out_ang_cmd_ff = np.zeros((1,), dtype=float)
pos_loop_out_ang_cmd_fb = np.zeros((1,), dtype=float)
pos_loop_out_lin_cmd_ff[0:2] = self.robot_velo_lin_world_ref[0:2]
if(self.flag_ctr_pos_hor and self.flag_set_robot_pose and self.flag_set_robot_pose_ref):
error_robot_posi = self.robot_posi_ref[0:2] - self.robot_posi[0:2]
mod_error_robot_posi = math.sqrt(error_robot_posi[0]**2 + error_robot_posi[1]**2)
if mod_error_robot_posi !=0:
normalized_error_robot_posi = error_robot_posi / mod_error_robot_posi
else:
normalized_error_robot_posi = 0
ctr_robot_posi = self.pos_hor_pid.call(time_stamp_ros, mod_error_robot_posi)
pos_loop_out_lin_cmd_fb[0:2] = normalized_error_robot_posi * ctr_robot_posi
self.pos_loop_out_lin_cmd[0:2] = pos_loop_out_lin_cmd_ff[0:2] + pos_loop_out_lin_cmd_fb[0:2]
pos_loop_out_lin_cmd_ff[2] = self.robot_velo_lin_world_ref[2]
if(self.flag_ctr_pos_z and self.flag_set_robot_pose and self.flag_set_robot_pose_ref):
error_pos_loop = self.robot_posi_ref[2] - self.robot_posi[2]
ctr_pos_loop = self.pos_z_pid.call(time_stamp_ros, error_pos_loop)
pos_loop_out_lin_cmd_fb[2] = ctr_pos_loop
self.pos_loop_out_lin_cmd[2] = pos_loop_out_lin_cmd_ff[2] + pos_loop_out_lin_cmd_fb[2]
pos_loop_out_ang_cmd_ff[0] = self.robot_velo_ang_world_ref[0]
if(self.flag_ctr_att_yaw and self.flag_set_robot_pose and self.flag_set_robot_pose_ref):
error_att_z = ars_lib_helpers.Quaternion.errorDiffFromQuatSimp(self.robot_atti_quat_simp_ref,self.robot_atti_quat_simp)
pos_loop_out_ang_cmd_fb[0] = self.att_yaw_pid.call(time_stamp_ros, error_att_z)
self.pos_loop_out_ang_cmd[0] = pos_loop_out_ang_cmd_ff[0] + pos_loop_out_ang_cmd_fb[0]
self.flag_set_pos_loop_out = True
return | true | true |
f7ff78e6293ad62ee1fdfc0d88cca35dc02c1cdc | 17,243 | py | Python | src/transformers/models/auto/modeling_tf_auto.py | HimashiRathnayake/adapter-transformers | d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4 | [
"Apache-2.0"
] | 50,404 | 2019-09-26T09:55:55.000Z | 2022-03-31T23:07:49.000Z | src/transformers/models/auto/modeling_tf_auto.py | HimashiRathnayake/adapter-transformers | d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4 | [
"Apache-2.0"
] | 13,179 | 2019-09-26T10:10:57.000Z | 2022-03-31T23:17:08.000Z | src/transformers/models/auto/modeling_tf_auto.py | HimashiRathnayake/adapter-transformers | d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4 | [
"Apache-2.0"
] | 13,337 | 2019-09-26T10:49:38.000Z | 2022-03-31T23:06:17.000Z | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
import warnings
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
logger = logging.get_logger(__name__)
TF_MODEL_MAPPING_NAMES = OrderedDict(
[
# Base model mapping
("deberta-v2", "TFDebertaV2Model"),
("deberta", "TFDebertaModel"),
("rembert", "TFRemBertModel"),
("roformer", "TFRoFormerModel"),
("convbert", "TFConvBertModel"),
("led", "TFLEDModel"),
("lxmert", "TFLxmertModel"),
("mt5", "TFMT5Model"),
("t5", "TFT5Model"),
("distilbert", "TFDistilBertModel"),
("albert", "TFAlbertModel"),
("bart", "TFBartModel"),
("camembert", "TFCamembertModel"),
("xlm-roberta", "TFXLMRobertaModel"),
("longformer", "TFLongformerModel"),
("roberta", "TFRobertaModel"),
("layoutlm", "TFLayoutLMModel"),
("bert", "TFBertModel"),
("openai-gpt", "TFOpenAIGPTModel"),
("gpt2", "TFGPT2Model"),
("mobilebert", "TFMobileBertModel"),
("transfo-xl", "TFTransfoXLModel"),
("xlnet", "TFXLNetModel"),
("flaubert", "TFFlaubertModel"),
("xlm", "TFXLMModel"),
("ctrl", "TFCTRLModel"),
("electra", "TFElectraModel"),
("funnel", ("TFFunnelModel", "TFFunnelBaseModel")),
("dpr", "TFDPRQuestionEncoder"),
("mpnet", "TFMPNetModel"),
("mbart", "TFMBartModel"),
("marian", "TFMarianModel"),
("pegasus", "TFPegasusModel"),
("blenderbot", "TFBlenderbotModel"),
("blenderbot-small", "TFBlenderbotSmallModel"),
("wav2vec2", "TFWav2Vec2Model"),
("hubert", "TFHubertModel"),
]
)
TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
[
# Model for pre-training mapping
("lxmert", "TFLxmertForPreTraining"),
("t5", "TFT5ForConditionalGeneration"),
("distilbert", "TFDistilBertForMaskedLM"),
("albert", "TFAlbertForPreTraining"),
("bart", "TFBartForConditionalGeneration"),
("camembert", "TFCamembertForMaskedLM"),
("xlm-roberta", "TFXLMRobertaForMaskedLM"),
("roberta", "TFRobertaForMaskedLM"),
("layoutlm", "TFLayoutLMForMaskedLM"),
("bert", "TFBertForPreTraining"),
("openai-gpt", "TFOpenAIGPTLMHeadModel"),
("gpt2", "TFGPT2LMHeadModel"),
("mobilebert", "TFMobileBertForPreTraining"),
("transfo-xl", "TFTransfoXLLMHeadModel"),
("xlnet", "TFXLNetLMHeadModel"),
("flaubert", "TFFlaubertWithLMHeadModel"),
("xlm", "TFXLMWithLMHeadModel"),
("ctrl", "TFCTRLLMHeadModel"),
("electra", "TFElectraForPreTraining"),
("funnel", "TFFunnelForPreTraining"),
("mpnet", "TFMPNetForMaskedLM"),
]
)
TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
[
# Model with LM heads mapping
("rembert", "TFRemBertForMaskedLM"),
("roformer", "TFRoFormerForMaskedLM"),
("convbert", "TFConvBertForMaskedLM"),
("led", "TFLEDForConditionalGeneration"),
("t5", "TFT5ForConditionalGeneration"),
("distilbert", "TFDistilBertForMaskedLM"),
("albert", "TFAlbertForMaskedLM"),
("marian", "TFMarianMTModel"),
("bart", "TFBartForConditionalGeneration"),
("camembert", "TFCamembertForMaskedLM"),
("xlm-roberta", "TFXLMRobertaForMaskedLM"),
("longformer", "TFLongformerForMaskedLM"),
("roberta", "TFRobertaForMaskedLM"),
("layoutlm", "TFLayoutLMForMaskedLM"),
("bert", "TFBertForMaskedLM"),
("openai-gpt", "TFOpenAIGPTLMHeadModel"),
("gpt2", "TFGPT2LMHeadModel"),
("mobilebert", "TFMobileBertForMaskedLM"),
("transfo-xl", "TFTransfoXLLMHeadModel"),
("xlnet", "TFXLNetLMHeadModel"),
("flaubert", "TFFlaubertWithLMHeadModel"),
("xlm", "TFXLMWithLMHeadModel"),
("ctrl", "TFCTRLLMHeadModel"),
("electra", "TFElectraForMaskedLM"),
("funnel", "TFFunnelForMaskedLM"),
("mpnet", "TFMPNetForMaskedLM"),
]
)
TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
[
# Model for Causal LM mapping
("rembert", "TFRemBertForCausalLM"),
("roformer", "TFRoFormerForCausalLM"),
("roberta", "TFRobertaForCausalLM"),
("bert", "TFBertLMHeadModel"),
("openai-gpt", "TFOpenAIGPTLMHeadModel"),
("gpt2", "TFGPT2LMHeadModel"),
("transfo-xl", "TFTransfoXLLMHeadModel"),
("xlnet", "TFXLNetLMHeadModel"),
("xlm", "TFXLMWithLMHeadModel"),
("ctrl", "TFCTRLLMHeadModel"),
]
)
TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
[
# Model for Masked LM mapping
("deberta-v2", "TFDebertaV2ForMaskedLM"),
("deberta", "TFDebertaForMaskedLM"),
("rembert", "TFRemBertForMaskedLM"),
("roformer", "TFRoFormerForMaskedLM"),
("convbert", "TFConvBertForMaskedLM"),
("distilbert", "TFDistilBertForMaskedLM"),
("albert", "TFAlbertForMaskedLM"),
("camembert", "TFCamembertForMaskedLM"),
("xlm-roberta", "TFXLMRobertaForMaskedLM"),
("longformer", "TFLongformerForMaskedLM"),
("roberta", "TFRobertaForMaskedLM"),
("layoutlm", "TFLayoutLMForMaskedLM"),
("bert", "TFBertForMaskedLM"),
("mobilebert", "TFMobileBertForMaskedLM"),
("flaubert", "TFFlaubertWithLMHeadModel"),
("xlm", "TFXLMWithLMHeadModel"),
("electra", "TFElectraForMaskedLM"),
("funnel", "TFFunnelForMaskedLM"),
("mpnet", "TFMPNetForMaskedLM"),
]
)
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("led", "TFLEDForConditionalGeneration"),
("mt5", "TFMT5ForConditionalGeneration"),
("t5", "TFT5ForConditionalGeneration"),
("marian", "TFMarianMTModel"),
("mbart", "TFMBartForConditionalGeneration"),
("pegasus", "TFPegasusForConditionalGeneration"),
("blenderbot", "TFBlenderbotForConditionalGeneration"),
("blenderbot-small", "TFBlenderbotSmallForConditionalGeneration"),
("bart", "TFBartForConditionalGeneration"),
("encoder-decoder", "TFEncoderDecoderModel"),
]
)
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
[
# Model for Sequence Classification mapping
("deberta-v2", "TFDebertaV2ForSequenceClassification"),
("deberta", "TFDebertaForSequenceClassification"),
("rembert", "TFRemBertForSequenceClassification"),
("roformer", "TFRoFormerForSequenceClassification"),
("convbert", "TFConvBertForSequenceClassification"),
("distilbert", "TFDistilBertForSequenceClassification"),
("albert", "TFAlbertForSequenceClassification"),
("camembert", "TFCamembertForSequenceClassification"),
("xlm-roberta", "TFXLMRobertaForSequenceClassification"),
("longformer", "TFLongformerForSequenceClassification"),
("roberta", "TFRobertaForSequenceClassification"),
("layoutlm", "TFLayoutLMForSequenceClassification"),
("bert", "TFBertForSequenceClassification"),
("xlnet", "TFXLNetForSequenceClassification"),
("mobilebert", "TFMobileBertForSequenceClassification"),
("flaubert", "TFFlaubertForSequenceClassification"),
("xlm", "TFXLMForSequenceClassification"),
("electra", "TFElectraForSequenceClassification"),
("funnel", "TFFunnelForSequenceClassification"),
("gpt2", "TFGPT2ForSequenceClassification"),
("mpnet", "TFMPNetForSequenceClassification"),
("openai-gpt", "TFOpenAIGPTForSequenceClassification"),
("transfo-xl", "TFTransfoXLForSequenceClassification"),
("ctrl", "TFCTRLForSequenceClassification"),
]
)
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
[
# Model for Question Answering mapping
("deberta-v2", "TFDebertaV2ForQuestionAnswering"),
("deberta", "TFDebertaForQuestionAnswering"),
("rembert", "TFRemBertForQuestionAnswering"),
("roformer", "TFRoFormerForQuestionAnswering"),
("convbert", "TFConvBertForQuestionAnswering"),
("distilbert", "TFDistilBertForQuestionAnswering"),
("albert", "TFAlbertForQuestionAnswering"),
("camembert", "TFCamembertForQuestionAnswering"),
("xlm-roberta", "TFXLMRobertaForQuestionAnswering"),
("longformer", "TFLongformerForQuestionAnswering"),
("roberta", "TFRobertaForQuestionAnswering"),
("bert", "TFBertForQuestionAnswering"),
("xlnet", "TFXLNetForQuestionAnsweringSimple"),
("mobilebert", "TFMobileBertForQuestionAnswering"),
("flaubert", "TFFlaubertForQuestionAnsweringSimple"),
("xlm", "TFXLMForQuestionAnsweringSimple"),
("electra", "TFElectraForQuestionAnswering"),
("funnel", "TFFunnelForQuestionAnswering"),
("mpnet", "TFMPNetForQuestionAnswering"),
]
)
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
[
# Model for Token Classification mapping
("deberta-v2", "TFDebertaV2ForTokenClassification"),
("deberta", "TFDebertaForTokenClassification"),
("rembert", "TFRemBertForTokenClassification"),
("roformer", "TFRoFormerForTokenClassification"),
("convbert", "TFConvBertForTokenClassification"),
("distilbert", "TFDistilBertForTokenClassification"),
("albert", "TFAlbertForTokenClassification"),
("camembert", "TFCamembertForTokenClassification"),
("flaubert", "TFFlaubertForTokenClassification"),
("xlm", "TFXLMForTokenClassification"),
("xlm-roberta", "TFXLMRobertaForTokenClassification"),
("longformer", "TFLongformerForTokenClassification"),
("roberta", "TFRobertaForTokenClassification"),
("layoutlm", "TFLayoutLMForTokenClassification"),
("bert", "TFBertForTokenClassification"),
("mobilebert", "TFMobileBertForTokenClassification"),
("xlnet", "TFXLNetForTokenClassification"),
("electra", "TFElectraForTokenClassification"),
("funnel", "TFFunnelForTokenClassification"),
("mpnet", "TFMPNetForTokenClassification"),
]
)
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict(
[
# Model for Multiple Choice mapping
("rembert", "TFRemBertForMultipleChoice"),
("roformer", "TFRoFormerForMultipleChoice"),
("convbert", "TFConvBertForMultipleChoice"),
("camembert", "TFCamembertForMultipleChoice"),
("xlm", "TFXLMForMultipleChoice"),
("xlm-roberta", "TFXLMRobertaForMultipleChoice"),
("longformer", "TFLongformerForMultipleChoice"),
("roberta", "TFRobertaForMultipleChoice"),
("bert", "TFBertForMultipleChoice"),
("distilbert", "TFDistilBertForMultipleChoice"),
("mobilebert", "TFMobileBertForMultipleChoice"),
("xlnet", "TFXLNetForMultipleChoice"),
("flaubert", "TFFlaubertForMultipleChoice"),
("albert", "TFAlbertForMultipleChoice"),
("electra", "TFElectraForMultipleChoice"),
("funnel", "TFFunnelForMultipleChoice"),
("mpnet", "TFMPNetForMultipleChoice"),
]
)
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict(
[
("bert", "TFBertForNextSentencePrediction"),
("mobilebert", "TFMobileBertForNextSentencePrediction"),
]
)
TF_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_MAPPING_NAMES)
TF_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
TF_MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES)
TF_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
TF_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
class TFAutoModel(_BaseAutoModelClass):
_model_mapping = TF_MODEL_MAPPING
TFAutoModel = auto_class_update(TFAutoModel)
class TFAutoModelForPreTraining(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_PRETRAINING_MAPPING
TFAutoModelForPreTraining = auto_class_update(TFAutoModelForPreTraining, head_doc="pretraining")
# Private on purpose, the public class will add the deprecation warnings.
class _TFAutoModelWithLMHead(_BaseAutoModelClass):
_model_mapping = TF_MODEL_WITH_LM_HEAD_MAPPING
_TFAutoModelWithLMHead = auto_class_update(_TFAutoModelWithLMHead, head_doc="language modeling")
class TFAutoModelForCausalLM(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING
TFAutoModelForCausalLM = auto_class_update(TFAutoModelForCausalLM, head_doc="causal language modeling")
class TFAutoModelForMaskedLM(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING
TFAutoModelForMaskedLM = auto_class_update(TFAutoModelForMaskedLM, head_doc="masked language modeling")
class TFAutoModelForSeq2SeqLM(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
TFAutoModelForSeq2SeqLM = auto_class_update(
TFAutoModelForSeq2SeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class TFAutoModelForSequenceClassification(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
TFAutoModelForSequenceClassification = auto_class_update(
TFAutoModelForSequenceClassification, head_doc="sequence classification"
)
class TFAutoModelForQuestionAnswering(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING
TFAutoModelForQuestionAnswering = auto_class_update(TFAutoModelForQuestionAnswering, head_doc="question answering")
class TFAutoModelForTokenClassification(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
TFAutoModelForTokenClassification = auto_class_update(
TFAutoModelForTokenClassification, head_doc="token classification"
)
class TFAutoModelForMultipleChoice(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
TFAutoModelForMultipleChoice = auto_class_update(TFAutoModelForMultipleChoice, head_doc="multiple choice")
class TFAutoModelForNextSentencePrediction(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
TFAutoModelForNextSentencePrediction = auto_class_update(
TFAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class TFAutoModelWithLMHead(_TFAutoModelWithLMHead):
@classmethod
def from_config(cls, config):
warnings.warn(
"The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models and "
"`TFAutoModelForSeq2SeqLM` for encoder-decoder models.",
FutureWarning,
)
return super().from_config(config)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
warnings.warn(
"The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models and "
"`TFAutoModelForSeq2SeqLM` for encoder-decoder models.",
FutureWarning,
)
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
| 39.914352 | 123 | 0.703764 |
import warnings
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
logger = logging.get_logger(__name__)
TF_MODEL_MAPPING_NAMES = OrderedDict(
[
("deberta-v2", "TFDebertaV2Model"),
("deberta", "TFDebertaModel"),
("rembert", "TFRemBertModel"),
("roformer", "TFRoFormerModel"),
("convbert", "TFConvBertModel"),
("led", "TFLEDModel"),
("lxmert", "TFLxmertModel"),
("mt5", "TFMT5Model"),
("t5", "TFT5Model"),
("distilbert", "TFDistilBertModel"),
("albert", "TFAlbertModel"),
("bart", "TFBartModel"),
("camembert", "TFCamembertModel"),
("xlm-roberta", "TFXLMRobertaModel"),
("longformer", "TFLongformerModel"),
("roberta", "TFRobertaModel"),
("layoutlm", "TFLayoutLMModel"),
("bert", "TFBertModel"),
("openai-gpt", "TFOpenAIGPTModel"),
("gpt2", "TFGPT2Model"),
("mobilebert", "TFMobileBertModel"),
("transfo-xl", "TFTransfoXLModel"),
("xlnet", "TFXLNetModel"),
("flaubert", "TFFlaubertModel"),
("xlm", "TFXLMModel"),
("ctrl", "TFCTRLModel"),
("electra", "TFElectraModel"),
("funnel", ("TFFunnelModel", "TFFunnelBaseModel")),
("dpr", "TFDPRQuestionEncoder"),
("mpnet", "TFMPNetModel"),
("mbart", "TFMBartModel"),
("marian", "TFMarianModel"),
("pegasus", "TFPegasusModel"),
("blenderbot", "TFBlenderbotModel"),
("blenderbot-small", "TFBlenderbotSmallModel"),
("wav2vec2", "TFWav2Vec2Model"),
("hubert", "TFHubertModel"),
]
)
TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
[
("lxmert", "TFLxmertForPreTraining"),
("t5", "TFT5ForConditionalGeneration"),
("distilbert", "TFDistilBertForMaskedLM"),
("albert", "TFAlbertForPreTraining"),
("bart", "TFBartForConditionalGeneration"),
("camembert", "TFCamembertForMaskedLM"),
("xlm-roberta", "TFXLMRobertaForMaskedLM"),
("roberta", "TFRobertaForMaskedLM"),
("layoutlm", "TFLayoutLMForMaskedLM"),
("bert", "TFBertForPreTraining"),
("openai-gpt", "TFOpenAIGPTLMHeadModel"),
("gpt2", "TFGPT2LMHeadModel"),
("mobilebert", "TFMobileBertForPreTraining"),
("transfo-xl", "TFTransfoXLLMHeadModel"),
("xlnet", "TFXLNetLMHeadModel"),
("flaubert", "TFFlaubertWithLMHeadModel"),
("xlm", "TFXLMWithLMHeadModel"),
("ctrl", "TFCTRLLMHeadModel"),
("electra", "TFElectraForPreTraining"),
("funnel", "TFFunnelForPreTraining"),
("mpnet", "TFMPNetForMaskedLM"),
]
)
TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
[
("rembert", "TFRemBertForMaskedLM"),
("roformer", "TFRoFormerForMaskedLM"),
("convbert", "TFConvBertForMaskedLM"),
("led", "TFLEDForConditionalGeneration"),
("t5", "TFT5ForConditionalGeneration"),
("distilbert", "TFDistilBertForMaskedLM"),
("albert", "TFAlbertForMaskedLM"),
("marian", "TFMarianMTModel"),
("bart", "TFBartForConditionalGeneration"),
("camembert", "TFCamembertForMaskedLM"),
("xlm-roberta", "TFXLMRobertaForMaskedLM"),
("longformer", "TFLongformerForMaskedLM"),
("roberta", "TFRobertaForMaskedLM"),
("layoutlm", "TFLayoutLMForMaskedLM"),
("bert", "TFBertForMaskedLM"),
("openai-gpt", "TFOpenAIGPTLMHeadModel"),
("gpt2", "TFGPT2LMHeadModel"),
("mobilebert", "TFMobileBertForMaskedLM"),
("transfo-xl", "TFTransfoXLLMHeadModel"),
("xlnet", "TFXLNetLMHeadModel"),
("flaubert", "TFFlaubertWithLMHeadModel"),
("xlm", "TFXLMWithLMHeadModel"),
("ctrl", "TFCTRLLMHeadModel"),
("electra", "TFElectraForMaskedLM"),
("funnel", "TFFunnelForMaskedLM"),
("mpnet", "TFMPNetForMaskedLM"),
]
)
TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
[
("rembert", "TFRemBertForCausalLM"),
("roformer", "TFRoFormerForCausalLM"),
("roberta", "TFRobertaForCausalLM"),
("bert", "TFBertLMHeadModel"),
("openai-gpt", "TFOpenAIGPTLMHeadModel"),
("gpt2", "TFGPT2LMHeadModel"),
("transfo-xl", "TFTransfoXLLMHeadModel"),
("xlnet", "TFXLNetLMHeadModel"),
("xlm", "TFXLMWithLMHeadModel"),
("ctrl", "TFCTRLLMHeadModel"),
]
)
TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
[
("deberta-v2", "TFDebertaV2ForMaskedLM"),
("deberta", "TFDebertaForMaskedLM"),
("rembert", "TFRemBertForMaskedLM"),
("roformer", "TFRoFormerForMaskedLM"),
("convbert", "TFConvBertForMaskedLM"),
("distilbert", "TFDistilBertForMaskedLM"),
("albert", "TFAlbertForMaskedLM"),
("camembert", "TFCamembertForMaskedLM"),
("xlm-roberta", "TFXLMRobertaForMaskedLM"),
("longformer", "TFLongformerForMaskedLM"),
("roberta", "TFRobertaForMaskedLM"),
("layoutlm", "TFLayoutLMForMaskedLM"),
("bert", "TFBertForMaskedLM"),
("mobilebert", "TFMobileBertForMaskedLM"),
("flaubert", "TFFlaubertWithLMHeadModel"),
("xlm", "TFXLMWithLMHeadModel"),
("electra", "TFElectraForMaskedLM"),
("funnel", "TFFunnelForMaskedLM"),
("mpnet", "TFMPNetForMaskedLM"),
]
)
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
[
("led", "TFLEDForConditionalGeneration"),
("mt5", "TFMT5ForConditionalGeneration"),
("t5", "TFT5ForConditionalGeneration"),
("marian", "TFMarianMTModel"),
("mbart", "TFMBartForConditionalGeneration"),
("pegasus", "TFPegasusForConditionalGeneration"),
("blenderbot", "TFBlenderbotForConditionalGeneration"),
("blenderbot-small", "TFBlenderbotSmallForConditionalGeneration"),
("bart", "TFBartForConditionalGeneration"),
("encoder-decoder", "TFEncoderDecoderModel"),
]
)
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
[
("deberta-v2", "TFDebertaV2ForSequenceClassification"),
("deberta", "TFDebertaForSequenceClassification"),
("rembert", "TFRemBertForSequenceClassification"),
("roformer", "TFRoFormerForSequenceClassification"),
("convbert", "TFConvBertForSequenceClassification"),
("distilbert", "TFDistilBertForSequenceClassification"),
("albert", "TFAlbertForSequenceClassification"),
("camembert", "TFCamembertForSequenceClassification"),
("xlm-roberta", "TFXLMRobertaForSequenceClassification"),
("longformer", "TFLongformerForSequenceClassification"),
("roberta", "TFRobertaForSequenceClassification"),
("layoutlm", "TFLayoutLMForSequenceClassification"),
("bert", "TFBertForSequenceClassification"),
("xlnet", "TFXLNetForSequenceClassification"),
("mobilebert", "TFMobileBertForSequenceClassification"),
("flaubert", "TFFlaubertForSequenceClassification"),
("xlm", "TFXLMForSequenceClassification"),
("electra", "TFElectraForSequenceClassification"),
("funnel", "TFFunnelForSequenceClassification"),
("gpt2", "TFGPT2ForSequenceClassification"),
("mpnet", "TFMPNetForSequenceClassification"),
("openai-gpt", "TFOpenAIGPTForSequenceClassification"),
("transfo-xl", "TFTransfoXLForSequenceClassification"),
("ctrl", "TFCTRLForSequenceClassification"),
]
)
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
[
("deberta-v2", "TFDebertaV2ForQuestionAnswering"),
("deberta", "TFDebertaForQuestionAnswering"),
("rembert", "TFRemBertForQuestionAnswering"),
("roformer", "TFRoFormerForQuestionAnswering"),
("convbert", "TFConvBertForQuestionAnswering"),
("distilbert", "TFDistilBertForQuestionAnswering"),
("albert", "TFAlbertForQuestionAnswering"),
("camembert", "TFCamembertForQuestionAnswering"),
("xlm-roberta", "TFXLMRobertaForQuestionAnswering"),
("longformer", "TFLongformerForQuestionAnswering"),
("roberta", "TFRobertaForQuestionAnswering"),
("bert", "TFBertForQuestionAnswering"),
("xlnet", "TFXLNetForQuestionAnsweringSimple"),
("mobilebert", "TFMobileBertForQuestionAnswering"),
("flaubert", "TFFlaubertForQuestionAnsweringSimple"),
("xlm", "TFXLMForQuestionAnsweringSimple"),
("electra", "TFElectraForQuestionAnswering"),
("funnel", "TFFunnelForQuestionAnswering"),
("mpnet", "TFMPNetForQuestionAnswering"),
]
)
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
[
("deberta-v2", "TFDebertaV2ForTokenClassification"),
("deberta", "TFDebertaForTokenClassification"),
("rembert", "TFRemBertForTokenClassification"),
("roformer", "TFRoFormerForTokenClassification"),
("convbert", "TFConvBertForTokenClassification"),
("distilbert", "TFDistilBertForTokenClassification"),
("albert", "TFAlbertForTokenClassification"),
("camembert", "TFCamembertForTokenClassification"),
("flaubert", "TFFlaubertForTokenClassification"),
("xlm", "TFXLMForTokenClassification"),
("xlm-roberta", "TFXLMRobertaForTokenClassification"),
("longformer", "TFLongformerForTokenClassification"),
("roberta", "TFRobertaForTokenClassification"),
("layoutlm", "TFLayoutLMForTokenClassification"),
("bert", "TFBertForTokenClassification"),
("mobilebert", "TFMobileBertForTokenClassification"),
("xlnet", "TFXLNetForTokenClassification"),
("electra", "TFElectraForTokenClassification"),
("funnel", "TFFunnelForTokenClassification"),
("mpnet", "TFMPNetForTokenClassification"),
]
)
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict(
[
("rembert", "TFRemBertForMultipleChoice"),
("roformer", "TFRoFormerForMultipleChoice"),
("convbert", "TFConvBertForMultipleChoice"),
("camembert", "TFCamembertForMultipleChoice"),
("xlm", "TFXLMForMultipleChoice"),
("xlm-roberta", "TFXLMRobertaForMultipleChoice"),
("longformer", "TFLongformerForMultipleChoice"),
("roberta", "TFRobertaForMultipleChoice"),
("bert", "TFBertForMultipleChoice"),
("distilbert", "TFDistilBertForMultipleChoice"),
("mobilebert", "TFMobileBertForMultipleChoice"),
("xlnet", "TFXLNetForMultipleChoice"),
("flaubert", "TFFlaubertForMultipleChoice"),
("albert", "TFAlbertForMultipleChoice"),
("electra", "TFElectraForMultipleChoice"),
("funnel", "TFFunnelForMultipleChoice"),
("mpnet", "TFMPNetForMultipleChoice"),
]
)
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict(
[
("bert", "TFBertForNextSentencePrediction"),
("mobilebert", "TFMobileBertForNextSentencePrediction"),
]
)
TF_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_MAPPING_NAMES)
TF_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
TF_MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES)
TF_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
TF_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
class TFAutoModel(_BaseAutoModelClass):
_model_mapping = TF_MODEL_MAPPING
TFAutoModel = auto_class_update(TFAutoModel)
class TFAutoModelForPreTraining(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_PRETRAINING_MAPPING
TFAutoModelForPreTraining = auto_class_update(TFAutoModelForPreTraining, head_doc="pretraining")
class _TFAutoModelWithLMHead(_BaseAutoModelClass):
_model_mapping = TF_MODEL_WITH_LM_HEAD_MAPPING
_TFAutoModelWithLMHead = auto_class_update(_TFAutoModelWithLMHead, head_doc="language modeling")
class TFAutoModelForCausalLM(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING
TFAutoModelForCausalLM = auto_class_update(TFAutoModelForCausalLM, head_doc="causal language modeling")
class TFAutoModelForMaskedLM(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING
TFAutoModelForMaskedLM = auto_class_update(TFAutoModelForMaskedLM, head_doc="masked language modeling")
class TFAutoModelForSeq2SeqLM(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
TFAutoModelForSeq2SeqLM = auto_class_update(
TFAutoModelForSeq2SeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class TFAutoModelForSequenceClassification(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
TFAutoModelForSequenceClassification = auto_class_update(
TFAutoModelForSequenceClassification, head_doc="sequence classification"
)
class TFAutoModelForQuestionAnswering(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING
TFAutoModelForQuestionAnswering = auto_class_update(TFAutoModelForQuestionAnswering, head_doc="question answering")
class TFAutoModelForTokenClassification(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
TFAutoModelForTokenClassification = auto_class_update(
TFAutoModelForTokenClassification, head_doc="token classification"
)
class TFAutoModelForMultipleChoice(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
TFAutoModelForMultipleChoice = auto_class_update(TFAutoModelForMultipleChoice, head_doc="multiple choice")
class TFAutoModelForNextSentencePrediction(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
TFAutoModelForNextSentencePrediction = auto_class_update(
TFAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class TFAutoModelWithLMHead(_TFAutoModelWithLMHead):
@classmethod
def from_config(cls, config):
warnings.warn(
"The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models and "
"`TFAutoModelForSeq2SeqLM` for encoder-decoder models.",
FutureWarning,
)
return super().from_config(config)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
warnings.warn(
"The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models and "
"`TFAutoModelForSeq2SeqLM` for encoder-decoder models.",
FutureWarning,
)
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
| true | true |
f7ff79584cdf675446a8a621aec105ad61a6a348 | 5,543 | py | Python | tonos_ts4/ts4.py | NilFoundation/ton-TestSuite4 | abc1921e9ec2a9c237daf271bb550ee458ac6565 | [
"Apache-2.0"
] | null | null | null | tonos_ts4/ts4.py | NilFoundation/ton-TestSuite4 | abc1921e9ec2a9c237daf271bb550ee458ac6565 | [
"Apache-2.0"
] | null | null | null | tonos_ts4/ts4.py | NilFoundation/ton-TestSuite4 | abc1921e9ec2a9c237daf271bb550ee458ac6565 | [
"Apache-2.0"
] | null | null | null | """
This file is part of TON OS.
TON OS is free software: you can redistribute it and/or modify
it under the terms of the Apache License 2.0 (http://www.apache.org/licenses/)
Copyright 2019-2021 (c) TON LABS
"""
import sys
import base64
import secrets
import json
import numbers
import re
import copy
import os.path
from glob import glob
from .util import *
from .address import *
from .abi import *
from .decoder import *
from .dump import *
from .global_functions import *
from .globals import core
from .BaseContract import BaseContract, decode_contract_answer
__version__ = version()
# TODO: Global decoding params. Add documentation
decoder = Decoder.defaults()
def check_exitcode(expected_ec, real_ec):
if expected_ec != real_ec:
xtra = None
if real_ec == 51: xtra = 'Calling of contract\'s constructor that has already been called.'
if real_ec == 52: xtra = 'Replay protection exception.'
if real_ec == 60: xtra = 'Inbound message has wrong function id.'
if real_ec == 76: xtra = 'Public function was called before constructor.'
# TODO: add more codes here...
if xtra is not None:
xtra = ': ' + xtra
verbose_('{}{}'.format(globals.core.get_last_error_msg(), xtra))
assert eq(expected_ec, real_ec, dismiss = not globals.G_STOP_AT_CRASH)
def process_actions(result: ExecutionResult, expect_ec = 0):
assert isinstance(result, ExecutionResult)
ec = result.exit_code
if globals.G_VERBOSE:
if ec != 0:
print(grey(' exit_code: ') + yellow(ec) + '\n')
if expect_ec != ec:
verbose_(globals.core.get_last_error_msg())
check_exitcode(expect_ec, ec)
if result.error is not None:
raise Exception("Transaction aborted: {}".format(result.error))
answer = None
for j in result.actions:
msg = Msg(json.loads(j))
# if globals.G_VERBOSE:
# print('process msg:', msg)
if msg.is_event():
if globals.G_VERBOSE or globals.G_SHOW_EVENTS:
# TODO: move this printing code to a separate function and file
xtra = ''
params = msg.params
if msg.is_event('DebugEvent'):
xtra = ' ={}'.format(decode_int(params['x']))
elif msg.is_event('LogEvent'):
params['comment'] = bytearray.fromhex(params['comment']).decode()
print(bright_blue('< event') + grey(': '), end='')
print(cyan(' '), grey('<-'), bright_cyan(format_addr(msg.src)))
print(cyan(grey(' name: ') + cyan('{}'.format(bright_cyan(msg.event)))))
print(grey(' params: ') + cyan(Params.stringify(params)), cyan(xtra), '\n')
globals.EVENTS.append(msg)
else:
# not event
if msg.is_unknown():
#print(msg)
if globals.G_VERBOSE:
print(yellow('WARNING! Unknown message!')) #TODO to highlight the print
elif msg.is_bounced():
pass
elif msg.is_answer():
# We expect only one answer
assert answer is None
answer = msg
continue
else:
assert msg.is_call() or msg.is_empty(), red('Unexpected type: {}'.format(msg.type))
globals.QUEUE.append(msg)
return (result.gas_used, answer)
def dispatch_messages(callback = None):
"""Dispatches all messages in the queue one by one until the queue becomes empty.
:param callback: Callback to be called for each processed message.
If callback returns False then the given message is skipped.
"""
while len(globals.QUEUE) > 0:
if callback is not None and callback(peek_msg()) == False:
pop_msg()
continue
dispatch_one_message()
def dispatch_one_message(expect_ec = 0):
"""Takes first unprocessed message from the queue and dispatches it.
Use `expect_ec` parameter if you expect non-zero exit code.
:param num expect_ec: Expected exit code
:return: The amount of gas spent on the execution of the transaction
:rtype: num
"""
msg = pop_msg()
globals.ALL_MESSAGES.append(msg)
# if is_method_call(msg, 'onRoundComplete'):
# dump_message(msg)
dump1 = globals.G_VERBOSE or globals.G_DUMP_MESSAGES
dump2 = globals.G_MSG_FILTER is not None and globals.G_MSG_FILTER(msg.data)
if dump1 or dump2:
dump_message(msg)
if msg.dst.is_none():
# TODO: a getter's reply. Add a test for that
return
result = globals.core.dispatch_message(msg.id)
result = ExecutionResult(result)
gas, answer = process_actions(result, expect_ec)
assert answer is None
return gas
#########################################################################################################
# TODO: add docs?
class BalanceWatcher:
def __init__(self, contract):
self.contract_ = contract
self.balance_ = contract.balance
self.epsilon_ = 2
def ensure_change(self, expected_diff):
cur_balance = self.contract_.balance
prev_balance = self.balance_
ensure_balance(prev_balance + expected_diff, cur_balance, epsilon = self.epsilon_)
self.balance_ = cur_balance
#########################################################################################################
| 34.861635 | 105 | 0.593541 |
import sys
import base64
import secrets
import json
import numbers
import re
import copy
import os.path
from glob import glob
from .util import *
from .address import *
from .abi import *
from .decoder import *
from .dump import *
from .global_functions import *
from .globals import core
from .BaseContract import BaseContract, decode_contract_answer
__version__ = version()
decoder = Decoder.defaults()
def check_exitcode(expected_ec, real_ec):
if expected_ec != real_ec:
xtra = None
if real_ec == 51: xtra = 'Calling of contract\'s constructor that has already been called.'
if real_ec == 52: xtra = 'Replay protection exception.'
if real_ec == 60: xtra = 'Inbound message has wrong function id.'
if real_ec == 76: xtra = 'Public function was called before constructor.'
# TODO: add more codes here...
if xtra is not None:
xtra = ': ' + xtra
verbose_('{}{}'.format(globals.core.get_last_error_msg(), xtra))
assert eq(expected_ec, real_ec, dismiss = not globals.G_STOP_AT_CRASH)
def process_actions(result: ExecutionResult, expect_ec = 0):
assert isinstance(result, ExecutionResult)
ec = result.exit_code
if globals.G_VERBOSE:
if ec != 0:
print(grey(' exit_code: ') + yellow(ec) + '\n')
if expect_ec != ec:
verbose_(globals.core.get_last_error_msg())
check_exitcode(expect_ec, ec)
if result.error is not None:
raise Exception("Transaction aborted: {}".format(result.error))
answer = None
for j in result.actions:
msg = Msg(json.loads(j))
# if globals.G_VERBOSE:
# print('process msg:', msg)
if msg.is_event():
if globals.G_VERBOSE or globals.G_SHOW_EVENTS:
# TODO: move this printing code to a separate function and file
xtra = ''
params = msg.params
if msg.is_event('DebugEvent'):
xtra = ' ={}'.format(decode_int(params['x']))
elif msg.is_event('LogEvent'):
params['comment'] = bytearray.fromhex(params['comment']).decode()
print(bright_blue('< event') + grey(': '), end='')
print(cyan(' '), grey('<-'), bright_cyan(format_addr(msg.src)))
print(cyan(grey(' name: ') + cyan('{}'.format(bright_cyan(msg.event)))))
print(grey(' params: ') + cyan(Params.stringify(params)), cyan(xtra), '\n')
globals.EVENTS.append(msg)
else:
# not event
if msg.is_unknown():
#print(msg)
if globals.G_VERBOSE:
print(yellow('WARNING! Unknown message!')) #TODO to highlight the print
elif msg.is_bounced():
pass
elif msg.is_answer():
# We expect only one answer
assert answer is None
answer = msg
continue
else:
assert msg.is_call() or msg.is_empty(), red('Unexpected type: {}'.format(msg.type))
globals.QUEUE.append(msg)
return (result.gas_used, answer)
def dispatch_messages(callback = None):
while len(globals.QUEUE) > 0:
if callback is not None and callback(peek_msg()) == False:
pop_msg()
continue
dispatch_one_message()
def dispatch_one_message(expect_ec = 0):
msg = pop_msg()
globals.ALL_MESSAGES.append(msg)
# if is_method_call(msg, 'onRoundComplete'):
# dump_message(msg)
dump1 = globals.G_VERBOSE or globals.G_DUMP_MESSAGES
dump2 = globals.G_MSG_FILTER is not None and globals.G_MSG_FILTER(msg.data)
if dump1 or dump2:
dump_message(msg)
if msg.dst.is_none():
# TODO: a getter's reply. Add a test for that
return
result = globals.core.dispatch_message(msg.id)
result = ExecutionResult(result)
gas, answer = process_actions(result, expect_ec)
assert answer is None
return gas
| true | true |
f7ff7a1fbbaa2864b7bdc620350cbc87eee65920 | 43,193 | py | Python | scripts/gen-s-parser.py | stagas/binaryen | a440c7697875e5fcc046370b40295f6dffe44ee0 | [
"Apache-2.0"
] | null | null | null | scripts/gen-s-parser.py | stagas/binaryen | a440c7697875e5fcc046370b40295f6dffe44ee0 | [
"Apache-2.0"
] | null | null | null | scripts/gen-s-parser.py | stagas/binaryen | a440c7697875e5fcc046370b40295f6dffe44ee0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2018 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
instructions = [
("unreachable", "makeUnreachable()"),
("nop", "makeNop()"),
("block", "makeBlock(s)"),
("loop", "makeLoop(s)"),
("if", "makeIf(s)"),
("then", "makeThenOrElse(s)"),
("else", "makeThenOrElse(s)"),
("br", "makeBreak(s)"),
("br_if", "makeBreak(s)"),
("br_table", "makeBreakTable(s)"),
("return", "makeReturn(s)"),
("call", "makeCall(s, /*isReturn=*/false)"),
("call_indirect", "makeCallIndirect(s, /*isReturn=*/false)"),
("return_call", "makeCall(s, /*isReturn=*/true)"),
("return_call_indirect", "makeCallIndirect(s, /*isReturn=*/true)"),
("drop", "makeDrop(s)"),
("select", "makeSelect(s)"),
("local.get", "makeLocalGet(s)"),
("local.set", "makeLocalSet(s)"),
("local.tee", "makeLocalTee(s)"),
("global.get", "makeGlobalGet(s)"),
("global.set", "makeGlobalSet(s)"),
("memory.init", "makeMemoryInit(s)"),
("data.drop", "makeDataDrop(s)"),
("memory.copy", "makeMemoryCopy(s)"),
("memory.fill", "makeMemoryFill(s)"),
("i32.load", "makeLoad(s, Type::i32, /*isAtomic=*/false)"),
("i64.load", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("f32.load", "makeLoad(s, Type::f32, /*isAtomic=*/false)"),
("f64.load", "makeLoad(s, Type::f64, /*isAtomic=*/false)"),
("i32.load8_s", "makeLoad(s, Type::i32, /*isAtomic=*/false)"),
("i32.load8_u", "makeLoad(s, Type::i32, /*isAtomic=*/false)"),
("i32.load16_s", "makeLoad(s, Type::i32, /*isAtomic=*/false)"),
("i32.load16_u", "makeLoad(s, Type::i32, /*isAtomic=*/false)"),
("i64.load8_s", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("i64.load8_u", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("i64.load16_s", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("i64.load16_u", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("i64.load32_s", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("i64.load32_u", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("i32.store", "makeStore(s, Type::i32, /*isAtomic=*/false)"),
("i64.store", "makeStore(s, Type::i64, /*isAtomic=*/false)"),
("f32.store", "makeStore(s, Type::f32, /*isAtomic=*/false)"),
("f64.store", "makeStore(s, Type::f64, /*isAtomic=*/false)"),
("i32.store8", "makeStore(s, Type::i32, /*isAtomic=*/false)"),
("i32.store16", "makeStore(s, Type::i32, /*isAtomic=*/false)"),
("i64.store8", "makeStore(s, Type::i64, /*isAtomic=*/false)"),
("i64.store16", "makeStore(s, Type::i64, /*isAtomic=*/false)"),
("i64.store32", "makeStore(s, Type::i64, /*isAtomic=*/false)"),
("memory.size", "makeMemorySize(s)"),
("memory.grow", "makeMemoryGrow(s)"),
("i32.const", "makeConst(s, Type::i32)"),
("i64.const", "makeConst(s, Type::i64)"),
("f32.const", "makeConst(s, Type::f32)"),
("f64.const", "makeConst(s, Type::f64)"),
("i32.eqz", "makeUnary(s, UnaryOp::EqZInt32)"),
("i32.eq", "makeBinary(s, BinaryOp::EqInt32)"),
("i32.ne", "makeBinary(s, BinaryOp::NeInt32)"),
("i32.lt_s", "makeBinary(s, BinaryOp::LtSInt32)"),
("i32.lt_u", "makeBinary(s, BinaryOp::LtUInt32)"),
("i32.gt_s", "makeBinary(s, BinaryOp::GtSInt32)"),
("i32.gt_u", "makeBinary(s, BinaryOp::GtUInt32)"),
("i32.le_s", "makeBinary(s, BinaryOp::LeSInt32)"),
("i32.le_u", "makeBinary(s, BinaryOp::LeUInt32)"),
("i32.ge_s", "makeBinary(s, BinaryOp::GeSInt32)"),
("i32.ge_u", "makeBinary(s, BinaryOp::GeUInt32)"),
("i64.eqz", "makeUnary(s, UnaryOp::EqZInt64)"),
("i64.eq", "makeBinary(s, BinaryOp::EqInt64)"),
("i64.ne", "makeBinary(s, BinaryOp::NeInt64)"),
("i64.lt_s", "makeBinary(s, BinaryOp::LtSInt64)"),
("i64.lt_u", "makeBinary(s, BinaryOp::LtUInt64)"),
("i64.gt_s", "makeBinary(s, BinaryOp::GtSInt64)"),
("i64.gt_u", "makeBinary(s, BinaryOp::GtUInt64)"),
("i64.le_s", "makeBinary(s, BinaryOp::LeSInt64)"),
("i64.le_u", "makeBinary(s, BinaryOp::LeUInt64)"),
("i64.ge_s", "makeBinary(s, BinaryOp::GeSInt64)"),
("i64.ge_u", "makeBinary(s, BinaryOp::GeUInt64)"),
("f32.eq", "makeBinary(s, BinaryOp::EqFloat32)"),
("f32.ne", "makeBinary(s, BinaryOp::NeFloat32)"),
("f32.lt", "makeBinary(s, BinaryOp::LtFloat32)"),
("f32.gt", "makeBinary(s, BinaryOp::GtFloat32)"),
("f32.le", "makeBinary(s, BinaryOp::LeFloat32)"),
("f32.ge", "makeBinary(s, BinaryOp::GeFloat32)"),
("f64.eq", "makeBinary(s, BinaryOp::EqFloat64)"),
("f64.ne", "makeBinary(s, BinaryOp::NeFloat64)"),
("f64.lt", "makeBinary(s, BinaryOp::LtFloat64)"),
("f64.gt", "makeBinary(s, BinaryOp::GtFloat64)"),
("f64.le", "makeBinary(s, BinaryOp::LeFloat64)"),
("f64.ge", "makeBinary(s, BinaryOp::GeFloat64)"),
("i32.clz", "makeUnary(s, UnaryOp::ClzInt32)"),
("i32.ctz", "makeUnary(s, UnaryOp::CtzInt32)"),
("i32.popcnt", "makeUnary(s, UnaryOp::PopcntInt32)"),
("i32.add", "makeBinary(s, BinaryOp::AddInt32)"),
("i32.sub", "makeBinary(s, BinaryOp::SubInt32)"),
("i32.mul", "makeBinary(s, BinaryOp::MulInt32)"),
("i32.div_s", "makeBinary(s, BinaryOp::DivSInt32)"),
("i32.div_u", "makeBinary(s, BinaryOp::DivUInt32)"),
("i32.rem_s", "makeBinary(s, BinaryOp::RemSInt32)"),
("i32.rem_u", "makeBinary(s, BinaryOp::RemUInt32)"),
("i32.and", "makeBinary(s, BinaryOp::AndInt32)"),
("i32.or", "makeBinary(s, BinaryOp::OrInt32)"),
("i32.xor", "makeBinary(s, BinaryOp::XorInt32)"),
("i32.shl", "makeBinary(s, BinaryOp::ShlInt32)"),
("i32.shr_s", "makeBinary(s, BinaryOp::ShrSInt32)"),
("i32.shr_u", "makeBinary(s, BinaryOp::ShrUInt32)"),
("i32.rotl", "makeBinary(s, BinaryOp::RotLInt32)"),
("i32.rotr", "makeBinary(s, BinaryOp::RotRInt32)"),
("i64.clz", "makeUnary(s, UnaryOp::ClzInt64)"),
("i64.ctz", "makeUnary(s, UnaryOp::CtzInt64)"),
("i64.popcnt", "makeUnary(s, UnaryOp::PopcntInt64)"),
("i64.add", "makeBinary(s, BinaryOp::AddInt64)"),
("i64.sub", "makeBinary(s, BinaryOp::SubInt64)"),
("i64.mul", "makeBinary(s, BinaryOp::MulInt64)"),
("i64.div_s", "makeBinary(s, BinaryOp::DivSInt64)"),
("i64.div_u", "makeBinary(s, BinaryOp::DivUInt64)"),
("i64.rem_s", "makeBinary(s, BinaryOp::RemSInt64)"),
("i64.rem_u", "makeBinary(s, BinaryOp::RemUInt64)"),
("i64.and", "makeBinary(s, BinaryOp::AndInt64)"),
("i64.or", "makeBinary(s, BinaryOp::OrInt64)"),
("i64.xor", "makeBinary(s, BinaryOp::XorInt64)"),
("i64.shl", "makeBinary(s, BinaryOp::ShlInt64)"),
("i64.shr_s", "makeBinary(s, BinaryOp::ShrSInt64)"),
("i64.shr_u", "makeBinary(s, BinaryOp::ShrUInt64)"),
("i64.rotl", "makeBinary(s, BinaryOp::RotLInt64)"),
("i64.rotr", "makeBinary(s, BinaryOp::RotRInt64)"),
("f32.abs", "makeUnary(s, UnaryOp::AbsFloat32)"),
("f32.neg", "makeUnary(s, UnaryOp::NegFloat32)"),
("f32.ceil", "makeUnary(s, UnaryOp::CeilFloat32)"),
("f32.floor", "makeUnary(s, UnaryOp::FloorFloat32)"),
("f32.trunc", "makeUnary(s, UnaryOp::TruncFloat32)"),
("f32.nearest", "makeUnary(s, UnaryOp::NearestFloat32)"),
("f32.sqrt", "makeUnary(s, UnaryOp::SqrtFloat32)"),
("f32.add", "makeBinary(s, BinaryOp::AddFloat32)"),
("f32.sub", "makeBinary(s, BinaryOp::SubFloat32)"),
("f32.mul", "makeBinary(s, BinaryOp::MulFloat32)"),
("f32.div", "makeBinary(s, BinaryOp::DivFloat32)"),
("f32.min", "makeBinary(s, BinaryOp::MinFloat32)"),
("f32.max", "makeBinary(s, BinaryOp::MaxFloat32)"),
("f32.copysign", "makeBinary(s, BinaryOp::CopySignFloat32)"),
("f64.abs", "makeUnary(s, UnaryOp::AbsFloat64)"),
("f64.neg", "makeUnary(s, UnaryOp::NegFloat64)"),
("f64.ceil", "makeUnary(s, UnaryOp::CeilFloat64)"),
("f64.floor", "makeUnary(s, UnaryOp::FloorFloat64)"),
("f64.trunc", "makeUnary(s, UnaryOp::TruncFloat64)"),
("f64.nearest", "makeUnary(s, UnaryOp::NearestFloat64)"),
("f64.sqrt", "makeUnary(s, UnaryOp::SqrtFloat64)"),
("f64.add", "makeBinary(s, BinaryOp::AddFloat64)"),
("f64.sub", "makeBinary(s, BinaryOp::SubFloat64)"),
("f64.mul", "makeBinary(s, BinaryOp::MulFloat64)"),
("f64.div", "makeBinary(s, BinaryOp::DivFloat64)"),
("f64.min", "makeBinary(s, BinaryOp::MinFloat64)"),
("f64.max", "makeBinary(s, BinaryOp::MaxFloat64)"),
("f64.copysign", "makeBinary(s, BinaryOp::CopySignFloat64)"),
("i32.wrap_i64", "makeUnary(s, UnaryOp::WrapInt64)"),
("i32.trunc_f32_s", "makeUnary(s, UnaryOp::TruncSFloat32ToInt32)"),
("i32.trunc_f32_u", "makeUnary(s, UnaryOp::TruncUFloat32ToInt32)"),
("i32.trunc_f64_s", "makeUnary(s, UnaryOp::TruncSFloat64ToInt32)"),
("i32.trunc_f64_u", "makeUnary(s, UnaryOp::TruncUFloat64ToInt32)"),
("i64.extend_i32_s", "makeUnary(s, UnaryOp::ExtendSInt32)"),
("i64.extend_i32_u", "makeUnary(s, UnaryOp::ExtendUInt32)"),
("i64.trunc_f32_s", "makeUnary(s, UnaryOp::TruncSFloat32ToInt64)"),
("i64.trunc_f32_u", "makeUnary(s, UnaryOp::TruncUFloat32ToInt64)"),
("i64.trunc_f64_s", "makeUnary(s, UnaryOp::TruncSFloat64ToInt64)"),
("i64.trunc_f64_u", "makeUnary(s, UnaryOp::TruncUFloat64ToInt64)"),
("f32.convert_i32_s", "makeUnary(s, UnaryOp::ConvertSInt32ToFloat32)"),
("f32.convert_i32_u", "makeUnary(s, UnaryOp::ConvertUInt32ToFloat32)"),
("f32.convert_i64_s", "makeUnary(s, UnaryOp::ConvertSInt64ToFloat32)"),
("f32.convert_i64_u", "makeUnary(s, UnaryOp::ConvertUInt64ToFloat32)"),
("f32.demote_f64", "makeUnary(s, UnaryOp::DemoteFloat64)"),
("f64.convert_i32_s", "makeUnary(s, UnaryOp::ConvertSInt32ToFloat64)"),
("f64.convert_i32_u", "makeUnary(s, UnaryOp::ConvertUInt32ToFloat64)"),
("f64.convert_i64_s", "makeUnary(s, UnaryOp::ConvertSInt64ToFloat64)"),
("f64.convert_i64_u", "makeUnary(s, UnaryOp::ConvertUInt64ToFloat64)"),
("f64.promote_f32", "makeUnary(s, UnaryOp::PromoteFloat32)"),
("i32.reinterpret_f32", "makeUnary(s, UnaryOp::ReinterpretFloat32)"),
("i64.reinterpret_f64", "makeUnary(s, UnaryOp::ReinterpretFloat64)"),
("f32.reinterpret_i32", "makeUnary(s, UnaryOp::ReinterpretInt32)"),
("f64.reinterpret_i64", "makeUnary(s, UnaryOp::ReinterpretInt64)"),
("i32.extend8_s", "makeUnary(s, UnaryOp::ExtendS8Int32)"),
("i32.extend16_s", "makeUnary(s, UnaryOp::ExtendS16Int32)"),
("i64.extend8_s", "makeUnary(s, UnaryOp::ExtendS8Int64)"),
("i64.extend16_s", "makeUnary(s, UnaryOp::ExtendS16Int64)"),
("i64.extend32_s", "makeUnary(s, UnaryOp::ExtendS32Int64)"),
# atomic instructions
("memory.atomic.notify", "makeAtomicNotify(s)"),
("memory.atomic.wait32", "makeAtomicWait(s, Type::i32)"),
("memory.atomic.wait64", "makeAtomicWait(s, Type::i64)"),
("atomic.fence", "makeAtomicFence(s)"),
("i32.atomic.load8_u", "makeLoad(s, Type::i32, /*isAtomic=*/true)"),
("i32.atomic.load16_u", "makeLoad(s, Type::i32, /*isAtomic=*/true)"),
("i32.atomic.load", "makeLoad(s, Type::i32, /*isAtomic=*/true)"),
("i64.atomic.load8_u", "makeLoad(s, Type::i64, /*isAtomic=*/true)"),
("i64.atomic.load16_u", "makeLoad(s, Type::i64, /*isAtomic=*/true)"),
("i64.atomic.load32_u", "makeLoad(s, Type::i64, /*isAtomic=*/true)"),
("i64.atomic.load", "makeLoad(s, Type::i64, /*isAtomic=*/true)"),
("i32.atomic.store8", "makeStore(s, Type::i32, /*isAtomic=*/true)"),
("i32.atomic.store16", "makeStore(s, Type::i32, /*isAtomic=*/true)"),
("i32.atomic.store", "makeStore(s, Type::i32, /*isAtomic=*/true)"),
("i64.atomic.store8", "makeStore(s, Type::i64, /*isAtomic=*/true)"),
("i64.atomic.store16", "makeStore(s, Type::i64, /*isAtomic=*/true)"),
("i64.atomic.store32", "makeStore(s, Type::i64, /*isAtomic=*/true)"),
("i64.atomic.store", "makeStore(s, Type::i64, /*isAtomic=*/true)"),
("i32.atomic.rmw8.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.add", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.add", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.atomic.rmw8.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.sub", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.sub", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.atomic.rmw8.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.and", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.and", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.atomic.rmw8.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.or", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.or", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.atomic.rmw8.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.xor", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.xor", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.atomic.rmw8.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.xchg", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.xchg", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.atomic.rmw8.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.cmpxchg", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.cmpxchg", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
# nontrapping float-to-int instructions
("i32.trunc_sat_f32_s", "makeUnary(s, UnaryOp::TruncSatSFloat32ToInt32)"),
("i32.trunc_sat_f32_u", "makeUnary(s, UnaryOp::TruncSatUFloat32ToInt32)"),
("i32.trunc_sat_f64_s", "makeUnary(s, UnaryOp::TruncSatSFloat64ToInt32)"),
("i32.trunc_sat_f64_u", "makeUnary(s, UnaryOp::TruncSatUFloat64ToInt32)"),
("i64.trunc_sat_f32_s", "makeUnary(s, UnaryOp::TruncSatSFloat32ToInt64)"),
("i64.trunc_sat_f32_u", "makeUnary(s, UnaryOp::TruncSatUFloat32ToInt64)"),
("i64.trunc_sat_f64_s", "makeUnary(s, UnaryOp::TruncSatSFloat64ToInt64)"),
("i64.trunc_sat_f64_u", "makeUnary(s, UnaryOp::TruncSatUFloat64ToInt64)"),
# SIMD ops
("v128.load", "makeLoad(s, Type::v128, /*isAtomic=*/false)"),
("v128.store", "makeStore(s, Type::v128, /*isAtomic=*/false)"),
("v128.const", "makeConst(s, Type::v128)"),
("i8x16.shuffle", "makeSIMDShuffle(s)"),
("i8x16.splat", "makeUnary(s, UnaryOp::SplatVecI8x16)"),
("i8x16.extract_lane_s", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneSVecI8x16, 16)"),
("i8x16.extract_lane_u", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneUVecI8x16, 16)"),
("i8x16.replace_lane", "makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI8x16, 16)"),
("i16x8.splat", "makeUnary(s, UnaryOp::SplatVecI16x8)"),
("i16x8.extract_lane_s", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneSVecI16x8, 8)"),
("i16x8.extract_lane_u", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneUVecI16x8, 8)"),
("i16x8.replace_lane", "makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI16x8, 8)"),
("i32x4.splat", "makeUnary(s, UnaryOp::SplatVecI32x4)"),
("i32x4.extract_lane", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneVecI32x4, 4)"),
("i32x4.replace_lane", "makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI32x4, 4)"),
("i64x2.splat", "makeUnary(s, UnaryOp::SplatVecI64x2)"),
("i64x2.extract_lane", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneVecI64x2, 2)"),
("i64x2.replace_lane", "makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI64x2, 2)"),
("f32x4.splat", "makeUnary(s, UnaryOp::SplatVecF32x4)"),
("f32x4.extract_lane", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneVecF32x4, 4)"),
("f32x4.replace_lane", "makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecF32x4, 4)"),
("f64x2.splat", "makeUnary(s, UnaryOp::SplatVecF64x2)"),
("f64x2.extract_lane", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneVecF64x2, 2)"),
("f64x2.replace_lane", "makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecF64x2, 2)"),
("i8x16.eq", "makeBinary(s, BinaryOp::EqVecI8x16)"),
("i8x16.ne", "makeBinary(s, BinaryOp::NeVecI8x16)"),
("i8x16.lt_s", "makeBinary(s, BinaryOp::LtSVecI8x16)"),
("i8x16.lt_u", "makeBinary(s, BinaryOp::LtUVecI8x16)"),
("i8x16.gt_s", "makeBinary(s, BinaryOp::GtSVecI8x16)"),
("i8x16.gt_u", "makeBinary(s, BinaryOp::GtUVecI8x16)"),
("i8x16.le_s", "makeBinary(s, BinaryOp::LeSVecI8x16)"),
("i8x16.le_u", "makeBinary(s, BinaryOp::LeUVecI8x16)"),
("i8x16.ge_s", "makeBinary(s, BinaryOp::GeSVecI8x16)"),
("i8x16.ge_u", "makeBinary(s, BinaryOp::GeUVecI8x16)"),
("i16x8.eq", "makeBinary(s, BinaryOp::EqVecI16x8)"),
("i16x8.ne", "makeBinary(s, BinaryOp::NeVecI16x8)"),
("i16x8.lt_s", "makeBinary(s, BinaryOp::LtSVecI16x8)"),
("i16x8.lt_u", "makeBinary(s, BinaryOp::LtUVecI16x8)"),
("i16x8.gt_s", "makeBinary(s, BinaryOp::GtSVecI16x8)"),
("i16x8.gt_u", "makeBinary(s, BinaryOp::GtUVecI16x8)"),
("i16x8.le_s", "makeBinary(s, BinaryOp::LeSVecI16x8)"),
("i16x8.le_u", "makeBinary(s, BinaryOp::LeUVecI16x8)"),
("i16x8.ge_s", "makeBinary(s, BinaryOp::GeSVecI16x8)"),
("i16x8.ge_u", "makeBinary(s, BinaryOp::GeUVecI16x8)"),
("i32x4.eq", "makeBinary(s, BinaryOp::EqVecI32x4)"),
("i32x4.ne", "makeBinary(s, BinaryOp::NeVecI32x4)"),
("i32x4.lt_s", "makeBinary(s, BinaryOp::LtSVecI32x4)"),
("i32x4.lt_u", "makeBinary(s, BinaryOp::LtUVecI32x4)"),
("i32x4.gt_s", "makeBinary(s, BinaryOp::GtSVecI32x4)"),
("i32x4.gt_u", "makeBinary(s, BinaryOp::GtUVecI32x4)"),
("i32x4.le_s", "makeBinary(s, BinaryOp::LeSVecI32x4)"),
("i32x4.le_u", "makeBinary(s, BinaryOp::LeUVecI32x4)"),
("i32x4.ge_s", "makeBinary(s, BinaryOp::GeSVecI32x4)"),
("i32x4.ge_u", "makeBinary(s, BinaryOp::GeUVecI32x4)"),
("i64x2.eq", "makeBinary(s, BinaryOp::EqVecI64x2)"),
("i64x2.ne", "makeBinary(s, BinaryOp::NeVecI64x2)"),
("i64x2.lt_s", "makeBinary(s, BinaryOp::LtSVecI64x2)"),
("i64x2.gt_s", "makeBinary(s, BinaryOp::GtSVecI64x2)"),
("i64x2.le_s", "makeBinary(s, BinaryOp::LeSVecI64x2)"),
("i64x2.ge_s", "makeBinary(s, BinaryOp::GeSVecI64x2)"),
("f32x4.eq", "makeBinary(s, BinaryOp::EqVecF32x4)"),
("f32x4.ne", "makeBinary(s, BinaryOp::NeVecF32x4)"),
("f32x4.lt", "makeBinary(s, BinaryOp::LtVecF32x4)"),
("f32x4.gt", "makeBinary(s, BinaryOp::GtVecF32x4)"),
("f32x4.le", "makeBinary(s, BinaryOp::LeVecF32x4)"),
("f32x4.ge", "makeBinary(s, BinaryOp::GeVecF32x4)"),
("f64x2.eq", "makeBinary(s, BinaryOp::EqVecF64x2)"),
("f64x2.ne", "makeBinary(s, BinaryOp::NeVecF64x2)"),
("f64x2.lt", "makeBinary(s, BinaryOp::LtVecF64x2)"),
("f64x2.gt", "makeBinary(s, BinaryOp::GtVecF64x2)"),
("f64x2.le", "makeBinary(s, BinaryOp::LeVecF64x2)"),
("f64x2.ge", "makeBinary(s, BinaryOp::GeVecF64x2)"),
("v128.not", "makeUnary(s, UnaryOp::NotVec128)"),
("v128.and", "makeBinary(s, BinaryOp::AndVec128)"),
("v128.or", "makeBinary(s, BinaryOp::OrVec128)"),
("v128.xor", "makeBinary(s, BinaryOp::XorVec128)"),
("v128.andnot", "makeBinary(s, BinaryOp::AndNotVec128)"),
("v128.any_true", "makeUnary(s, UnaryOp::AnyTrueVec128)"),
("v128.bitselect", "makeSIMDTernary(s, SIMDTernaryOp::Bitselect)"),
("v128.load8_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load8LaneVec128)"),
("v128.load16_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load16LaneVec128)"),
("v128.load32_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load32LaneVec128)"),
("v128.load64_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load64LaneVec128)"),
("v128.store8_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store8LaneVec128)"),
("v128.store16_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store16LaneVec128)"),
("v128.store32_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store32LaneVec128)"),
("v128.store64_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store64LaneVec128)"),
("i8x16.popcnt", "makeUnary(s, UnaryOp::PopcntVecI8x16)"),
("i8x16.abs", "makeUnary(s, UnaryOp::AbsVecI8x16)"),
("i8x16.neg", "makeUnary(s, UnaryOp::NegVecI8x16)"),
("i8x16.all_true", "makeUnary(s, UnaryOp::AllTrueVecI8x16)"),
("i8x16.bitmask", "makeUnary(s, UnaryOp::BitmaskVecI8x16)"),
("i8x16.shl", "makeSIMDShift(s, SIMDShiftOp::ShlVecI8x16)"),
("i8x16.shr_s", "makeSIMDShift(s, SIMDShiftOp::ShrSVecI8x16)"),
("i8x16.shr_u", "makeSIMDShift(s, SIMDShiftOp::ShrUVecI8x16)"),
("i8x16.add", "makeBinary(s, BinaryOp::AddVecI8x16)"),
("i8x16.add_sat_s", "makeBinary(s, BinaryOp::AddSatSVecI8x16)"),
("i8x16.add_sat_u", "makeBinary(s, BinaryOp::AddSatUVecI8x16)"),
("i8x16.sub", "makeBinary(s, BinaryOp::SubVecI8x16)"),
("i8x16.sub_sat_s", "makeBinary(s, BinaryOp::SubSatSVecI8x16)"),
("i8x16.sub_sat_u", "makeBinary(s, BinaryOp::SubSatUVecI8x16)"),
("i8x16.min_s", "makeBinary(s, BinaryOp::MinSVecI8x16)"),
("i8x16.min_u", "makeBinary(s, BinaryOp::MinUVecI8x16)"),
("i8x16.max_s", "makeBinary(s, BinaryOp::MaxSVecI8x16)"),
("i8x16.max_u", "makeBinary(s, BinaryOp::MaxUVecI8x16)"),
("i8x16.avgr_u", "makeBinary(s, BinaryOp::AvgrUVecI8x16)"),
("i16x8.abs", "makeUnary(s, UnaryOp::AbsVecI16x8)"),
("i16x8.neg", "makeUnary(s, UnaryOp::NegVecI16x8)"),
("i16x8.all_true", "makeUnary(s, UnaryOp::AllTrueVecI16x8)"),
("i16x8.bitmask", "makeUnary(s, UnaryOp::BitmaskVecI16x8)"),
("i16x8.shl", "makeSIMDShift(s, SIMDShiftOp::ShlVecI16x8)"),
("i16x8.shr_s", "makeSIMDShift(s, SIMDShiftOp::ShrSVecI16x8)"),
("i16x8.shr_u", "makeSIMDShift(s, SIMDShiftOp::ShrUVecI16x8)"),
("i16x8.add", "makeBinary(s, BinaryOp::AddVecI16x8)"),
("i16x8.add_sat_s", "makeBinary(s, BinaryOp::AddSatSVecI16x8)"),
("i16x8.add_sat_u", "makeBinary(s, BinaryOp::AddSatUVecI16x8)"),
("i16x8.sub", "makeBinary(s, BinaryOp::SubVecI16x8)"),
("i16x8.sub_sat_s", "makeBinary(s, BinaryOp::SubSatSVecI16x8)"),
("i16x8.sub_sat_u", "makeBinary(s, BinaryOp::SubSatUVecI16x8)"),
("i16x8.mul", "makeBinary(s, BinaryOp::MulVecI16x8)"),
("i16x8.min_s", "makeBinary(s, BinaryOp::MinSVecI16x8)"),
("i16x8.min_u", "makeBinary(s, BinaryOp::MinUVecI16x8)"),
("i16x8.max_s", "makeBinary(s, BinaryOp::MaxSVecI16x8)"),
("i16x8.max_u", "makeBinary(s, BinaryOp::MaxUVecI16x8)"),
("i16x8.avgr_u", "makeBinary(s, BinaryOp::AvgrUVecI16x8)"),
("i16x8.q15mulr_sat_s", "makeBinary(s, BinaryOp::Q15MulrSatSVecI16x8)"),
("i16x8.extmul_low_i8x16_s", "makeBinary(s, BinaryOp::ExtMulLowSVecI16x8)"),
("i16x8.extmul_high_i8x16_s", "makeBinary(s, BinaryOp::ExtMulHighSVecI16x8)"),
("i16x8.extmul_low_i8x16_u", "makeBinary(s, BinaryOp::ExtMulLowUVecI16x8)"),
("i16x8.extmul_high_i8x16_u", "makeBinary(s, BinaryOp::ExtMulHighUVecI16x8)"),
("i32x4.abs", "makeUnary(s, UnaryOp::AbsVecI32x4)"),
("i32x4.neg", "makeUnary(s, UnaryOp::NegVecI32x4)"),
("i32x4.all_true", "makeUnary(s, UnaryOp::AllTrueVecI32x4)"),
("i32x4.bitmask", "makeUnary(s, UnaryOp::BitmaskVecI32x4)"),
("i32x4.shl", "makeSIMDShift(s, SIMDShiftOp::ShlVecI32x4)"),
("i32x4.shr_s", "makeSIMDShift(s, SIMDShiftOp::ShrSVecI32x4)"),
("i32x4.shr_u", "makeSIMDShift(s, SIMDShiftOp::ShrUVecI32x4)"),
("i32x4.add", "makeBinary(s, BinaryOp::AddVecI32x4)"),
("i32x4.sub", "makeBinary(s, BinaryOp::SubVecI32x4)"),
("i32x4.mul", "makeBinary(s, BinaryOp::MulVecI32x4)"),
("i32x4.min_s", "makeBinary(s, BinaryOp::MinSVecI32x4)"),
("i32x4.min_u", "makeBinary(s, BinaryOp::MinUVecI32x4)"),
("i32x4.max_s", "makeBinary(s, BinaryOp::MaxSVecI32x4)"),
("i32x4.max_u", "makeBinary(s, BinaryOp::MaxUVecI32x4)"),
("i32x4.dot_i16x8_s", "makeBinary(s, BinaryOp::DotSVecI16x8ToVecI32x4)"),
("i32x4.extmul_low_i16x8_s", "makeBinary(s, BinaryOp::ExtMulLowSVecI32x4)"),
("i32x4.extmul_high_i16x8_s", "makeBinary(s, BinaryOp::ExtMulHighSVecI32x4)"),
("i32x4.extmul_low_i16x8_u", "makeBinary(s, BinaryOp::ExtMulLowUVecI32x4)"),
("i32x4.extmul_high_i16x8_u", "makeBinary(s, BinaryOp::ExtMulHighUVecI32x4)"),
("i64x2.abs", "makeUnary(s, UnaryOp::AbsVecI64x2)"),
("i64x2.neg", "makeUnary(s, UnaryOp::NegVecI64x2)"),
("i64x2.all_true", "makeUnary(s, UnaryOp::AllTrueVecI64x2)"),
("i64x2.bitmask", "makeUnary(s, UnaryOp::BitmaskVecI64x2)"),
("i64x2.shl", "makeSIMDShift(s, SIMDShiftOp::ShlVecI64x2)"),
("i64x2.shr_s", "makeSIMDShift(s, SIMDShiftOp::ShrSVecI64x2)"),
("i64x2.shr_u", "makeSIMDShift(s, SIMDShiftOp::ShrUVecI64x2)"),
("i64x2.add", "makeBinary(s, BinaryOp::AddVecI64x2)"),
("i64x2.sub", "makeBinary(s, BinaryOp::SubVecI64x2)"),
("i64x2.mul", "makeBinary(s, BinaryOp::MulVecI64x2)"),
("i64x2.extmul_low_i32x4_s", "makeBinary(s, BinaryOp::ExtMulLowSVecI64x2)"),
("i64x2.extmul_high_i32x4_s", "makeBinary(s, BinaryOp::ExtMulHighSVecI64x2)"),
("i64x2.extmul_low_i32x4_u", "makeBinary(s, BinaryOp::ExtMulLowUVecI64x2)"),
("i64x2.extmul_high_i32x4_u", "makeBinary(s, BinaryOp::ExtMulHighUVecI64x2)"),
("f32x4.abs", "makeUnary(s, UnaryOp::AbsVecF32x4)"),
("f32x4.neg", "makeUnary(s, UnaryOp::NegVecF32x4)"),
("f32x4.sqrt", "makeUnary(s, UnaryOp::SqrtVecF32x4)"),
("f32x4.add", "makeBinary(s, BinaryOp::AddVecF32x4)"),
("f32x4.sub", "makeBinary(s, BinaryOp::SubVecF32x4)"),
("f32x4.mul", "makeBinary(s, BinaryOp::MulVecF32x4)"),
("f32x4.div", "makeBinary(s, BinaryOp::DivVecF32x4)"),
("f32x4.min", "makeBinary(s, BinaryOp::MinVecF32x4)"),
("f32x4.max", "makeBinary(s, BinaryOp::MaxVecF32x4)"),
("f32x4.pmin", "makeBinary(s, BinaryOp::PMinVecF32x4)"),
("f32x4.pmax", "makeBinary(s, BinaryOp::PMaxVecF32x4)"),
("f32x4.ceil", "makeUnary(s, UnaryOp::CeilVecF32x4)"),
("f32x4.floor", "makeUnary(s, UnaryOp::FloorVecF32x4)"),
("f32x4.trunc", "makeUnary(s, UnaryOp::TruncVecF32x4)"),
("f32x4.nearest", "makeUnary(s, UnaryOp::NearestVecF32x4)"),
("f64x2.abs", "makeUnary(s, UnaryOp::AbsVecF64x2)"),
("f64x2.neg", "makeUnary(s, UnaryOp::NegVecF64x2)"),
("f64x2.sqrt", "makeUnary(s, UnaryOp::SqrtVecF64x2)"),
("f64x2.add", "makeBinary(s, BinaryOp::AddVecF64x2)"),
("f64x2.sub", "makeBinary(s, BinaryOp::SubVecF64x2)"),
("f64x2.mul", "makeBinary(s, BinaryOp::MulVecF64x2)"),
("f64x2.div", "makeBinary(s, BinaryOp::DivVecF64x2)"),
("f64x2.min", "makeBinary(s, BinaryOp::MinVecF64x2)"),
("f64x2.max", "makeBinary(s, BinaryOp::MaxVecF64x2)"),
("f64x2.pmin", "makeBinary(s, BinaryOp::PMinVecF64x2)"),
("f64x2.pmax", "makeBinary(s, BinaryOp::PMaxVecF64x2)"),
("f64x2.ceil", "makeUnary(s, UnaryOp::CeilVecF64x2)"),
("f64x2.floor", "makeUnary(s, UnaryOp::FloorVecF64x2)"),
("f64x2.trunc", "makeUnary(s, UnaryOp::TruncVecF64x2)"),
("f64x2.nearest", "makeUnary(s, UnaryOp::NearestVecF64x2)"),
("i32x4.trunc_sat_f32x4_s", "makeUnary(s, UnaryOp::TruncSatSVecF32x4ToVecI32x4)"),
("i32x4.trunc_sat_f32x4_u", "makeUnary(s, UnaryOp::TruncSatUVecF32x4ToVecI32x4)"),
("f32x4.convert_i32x4_s", "makeUnary(s, UnaryOp::ConvertSVecI32x4ToVecF32x4)"),
("f32x4.convert_i32x4_u", "makeUnary(s, UnaryOp::ConvertUVecI32x4ToVecF32x4)"),
("v128.load8_splat", "makeSIMDLoad(s, SIMDLoadOp::Load8SplatVec128)"),
("v128.load16_splat", "makeSIMDLoad(s, SIMDLoadOp::Load16SplatVec128)"),
("v128.load32_splat", "makeSIMDLoad(s, SIMDLoadOp::Load32SplatVec128)"),
("v128.load64_splat", "makeSIMDLoad(s, SIMDLoadOp::Load64SplatVec128)"),
("v128.load8x8_s", "makeSIMDLoad(s, SIMDLoadOp::Load8x8SVec128)"),
("v128.load8x8_u", "makeSIMDLoad(s, SIMDLoadOp::Load8x8UVec128)"),
("v128.load16x4_s", "makeSIMDLoad(s, SIMDLoadOp::Load16x4SVec128)"),
("v128.load16x4_u", "makeSIMDLoad(s, SIMDLoadOp::Load16x4UVec128)"),
("v128.load32x2_s", "makeSIMDLoad(s, SIMDLoadOp::Load32x2SVec128)"),
("v128.load32x2_u", "makeSIMDLoad(s, SIMDLoadOp::Load32x2UVec128)"),
("v128.load32_zero", "makeSIMDLoad(s, SIMDLoadOp::Load32ZeroVec128)"),
("v128.load64_zero", "makeSIMDLoad(s, SIMDLoadOp::Load64ZeroVec128)"),
("i8x16.narrow_i16x8_s", "makeBinary(s, BinaryOp::NarrowSVecI16x8ToVecI8x16)"),
("i8x16.narrow_i16x8_u", "makeBinary(s, BinaryOp::NarrowUVecI16x8ToVecI8x16)"),
("i16x8.narrow_i32x4_s", "makeBinary(s, BinaryOp::NarrowSVecI32x4ToVecI16x8)"),
("i16x8.narrow_i32x4_u", "makeBinary(s, BinaryOp::NarrowUVecI32x4ToVecI16x8)"),
("i16x8.extend_low_i8x16_s", "makeUnary(s, UnaryOp::ExtendLowSVecI8x16ToVecI16x8)"),
("i16x8.extend_high_i8x16_s", "makeUnary(s, UnaryOp::ExtendHighSVecI8x16ToVecI16x8)"),
("i16x8.extend_low_i8x16_u", "makeUnary(s, UnaryOp::ExtendLowUVecI8x16ToVecI16x8)"),
("i16x8.extend_high_i8x16_u", "makeUnary(s, UnaryOp::ExtendHighUVecI8x16ToVecI16x8)"),
("i32x4.extend_low_i16x8_s", "makeUnary(s, UnaryOp::ExtendLowSVecI16x8ToVecI32x4)"),
("i32x4.extend_high_i16x8_s", "makeUnary(s, UnaryOp::ExtendHighSVecI16x8ToVecI32x4)"),
("i32x4.extend_low_i16x8_u", "makeUnary(s, UnaryOp::ExtendLowUVecI16x8ToVecI32x4)"),
("i32x4.extend_high_i16x8_u", "makeUnary(s, UnaryOp::ExtendHighUVecI16x8ToVecI32x4)"),
("i64x2.extend_low_i32x4_s", "makeUnary(s, UnaryOp::ExtendLowSVecI32x4ToVecI64x2)"),
("i64x2.extend_high_i32x4_s", "makeUnary(s, UnaryOp::ExtendHighSVecI32x4ToVecI64x2)"),
("i64x2.extend_low_i32x4_u", "makeUnary(s, UnaryOp::ExtendLowUVecI32x4ToVecI64x2)"),
("i64x2.extend_high_i32x4_u", "makeUnary(s, UnaryOp::ExtendHighUVecI32x4ToVecI64x2)"),
("i8x16.swizzle", "makeBinary(s, BinaryOp::SwizzleVec8x16)"),
("i16x8.extadd_pairwise_i8x16_s", "makeUnary(s, UnaryOp::ExtAddPairwiseSVecI8x16ToI16x8)"),
("i16x8.extadd_pairwise_i8x16_u", "makeUnary(s, UnaryOp::ExtAddPairwiseUVecI8x16ToI16x8)"),
("i32x4.extadd_pairwise_i16x8_s", "makeUnary(s, UnaryOp::ExtAddPairwiseSVecI16x8ToI32x4)"),
("i32x4.extadd_pairwise_i16x8_u", "makeUnary(s, UnaryOp::ExtAddPairwiseUVecI16x8ToI32x4)"),
("f64x2.convert_low_i32x4_s", "makeUnary(s, UnaryOp::ConvertLowSVecI32x4ToVecF64x2)"),
("f64x2.convert_low_i32x4_u", "makeUnary(s, UnaryOp::ConvertLowUVecI32x4ToVecF64x2)"),
("i32x4.trunc_sat_f64x2_s_zero", "makeUnary(s, UnaryOp::TruncSatZeroSVecF64x2ToVecI32x4)"),
("i32x4.trunc_sat_f64x2_u_zero", "makeUnary(s, UnaryOp::TruncSatZeroUVecF64x2ToVecI32x4)"),
("f32x4.demote_f64x2_zero", "makeUnary(s, UnaryOp::DemoteZeroVecF64x2ToVecF32x4)"),
("f64x2.promote_low_f32x4", "makeUnary(s, UnaryOp::PromoteLowVecF32x4ToVecF64x2)"),
# reference types instructions
# TODO Add table instructions
("ref.null", "makeRefNull(s)"),
("ref.is_null", "makeRefIs(s, RefIsNull)"),
("ref.func", "makeRefFunc(s)"),
# exception handling instructions
("try", "makeTry(s)"),
("throw", "makeThrow(s)"),
("rethrow", "makeRethrow(s)"),
# Multivalue pseudoinstructions
("tuple.make", "makeTupleMake(s)"),
("tuple.extract", "makeTupleExtract(s)"),
("pop", "makePop(s)"),
# Typed function references instructions
("call_ref", "makeCallRef(s, /*isReturn=*/false)"),
("return_call_ref", "makeCallRef(s, /*isReturn=*/true)"),
# GC
("ref.eq", "makeRefEq(s)"),
("i31.new", "makeI31New(s)"),
("i31.get_s", "makeI31Get(s, true)"),
("i31.get_u", "makeI31Get(s, false)"),
("ref.test", "makeRefTest(s)"),
("ref.cast", "makeRefCast(s)"),
("br_on_null", "makeBrOn(s, BrOnNull)"),
("br_on_non_null", "makeBrOn(s, BrOnNonNull)"),
("br_on_cast", "makeBrOn(s, BrOnCast)"),
("br_on_cast_fail", "makeBrOn(s, BrOnCastFail)"),
("br_on_func", "makeBrOn(s, BrOnFunc)"),
("br_on_non_func", "makeBrOn(s, BrOnNonFunc)"),
("br_on_data", "makeBrOn(s, BrOnData)"),
("br_on_non_data", "makeBrOn(s, BrOnNonData)"),
("br_on_i31", "makeBrOn(s, BrOnI31)"),
("br_on_non_i31", "makeBrOn(s, BrOnNonI31)"),
("rtt.canon", "makeRttCanon(s)"),
("rtt.sub", "makeRttSub(s)"),
("struct.new_with_rtt", "makeStructNew(s, false)"),
("struct.new_default_with_rtt", "makeStructNew(s, true)"),
("struct.get", "makeStructGet(s)"),
("struct.get_s", "makeStructGet(s, true)"),
("struct.get_u", "makeStructGet(s, false)"),
("struct.set", "makeStructSet(s)"),
("array.new_with_rtt", "makeArrayNew(s, false)"),
("array.new_default_with_rtt", "makeArrayNew(s, true)"),
("array.get", "makeArrayGet(s)"),
("array.get_s", "makeArrayGet(s, true)"),
("array.get_u", "makeArrayGet(s, false)"),
("array.set", "makeArraySet(s)"),
("array.len", "makeArrayLen(s)"),
("array.copy", "makeArrayCopy(s)"),
("ref.is_func", "makeRefIs(s, RefIsFunc)"),
("ref.is_data", "makeRefIs(s, RefIsData)"),
("ref.is_i31", "makeRefIs(s, RefIsI31)"),
("ref.as_non_null", "makeRefAs(s, RefAsNonNull)"),
("ref.as_func", "makeRefAs(s, RefAsFunc)"),
("ref.as_data", "makeRefAs(s, RefAsData)"),
("ref.as_i31", "makeRefAs(s, RefAsI31)"),
]
class CodePrinter:
indents = 0
def __enter__(self):
CodePrinter.indents += 1
def __exit__(self, *args):
CodePrinter.indents -= 1
def indent(self):
# call in a 'with' statement
return self
def print_line(self, line):
print(" " * CodePrinter.indents + line)
class Node:
def __init__(self, expr=None, children=None, inst=None):
# the expression to return if this is the string has ended
self.expr = expr
# map unique strings to children nodes
self.children = children if children else {}
# full instruction leading to this node
self.inst = inst
def _common_prefix(a, b):
"""Return the common prefix of two strings."""
prefix = []
while a and b and a[0] == b[0]:
prefix.append(a[0])
a = a[1:]
b = b[1:]
return "".join(prefix)
def do_insert(self, full_inst, inst, expr):
if not inst:
assert self.expr is None, "Repeated instruction " + full_inst
self.expr = expr
self.inst = full_inst
return
# find key with shared prefix
prefix, key = "", None
for k in self.children:
prefix = Node._common_prefix(inst, k)
if prefix:
key = k
break
if key is None:
# unique prefix, insert and stop
self.children[inst] = Node(expr, inst=full_inst)
return
key_remainder = key[len(prefix):]
if key_remainder:
# split key and move everything after the prefix to a new node
child = self.children.pop(key)
self.children[prefix] = Node(children={key_remainder: child})
# update key for recursive insert
key = prefix
# chop off prefix and recurse
self.children[key].do_insert(full_inst, inst[len(key):], expr)
def insert(self, inst, expr):
self.do_insert(inst, inst, expr)
def instruction_parser():
"""Build a trie out of all the instructions, then emit it as C++ code."""
trie = Node()
inst_length = 0
for inst, expr in instructions:
inst_length = max(inst_length, len(inst))
trie.insert(inst, expr)
printer = CodePrinter()
printer.print_line("char op[{}] = {{'\\0'}};".format(inst_length + 1))
printer.print_line("strncpy(op, s[0]->c_str(), {});".format(inst_length))
def print_leaf(expr, inst):
printer.print_line("if (strcmp(op, \"{inst}\") == 0) {{ return {expr}; }}"
.format(inst=inst, expr=expr))
printer.print_line("goto parse_error;")
def emit(node, idx=0):
assert node.children
printer.print_line("switch (op[{}]) {{".format(idx))
with printer.indent():
if node.expr:
printer.print_line("case '\\0':")
with printer.indent():
print_leaf(node.expr, node.inst)
children = sorted(node.children.items(), key=lambda pair: pair[0])
for prefix, child in children:
if child.children:
printer.print_line("case '{}': {{".format(prefix[0]))
with printer.indent():
emit(child, idx + len(prefix))
printer.print_line("}")
else:
assert child.expr
printer.print_line("case '{}':".format(prefix[0]))
with printer.indent():
print_leaf(child.expr, child.inst)
printer.print_line("default: goto parse_error;")
printer.print_line("}")
emit(trie)
printer.print_line("parse_error:")
with printer.indent():
printer.print_line("throw ParseException(std::string(op), s.line, s.col);")
def print_header():
print("// DO NOT EDIT! This file generated by scripts/gen-s-parser.py\n")
print("// clang-format off\n")
def print_footer():
print("\n// clang-format on")
def generate_with_guard(generator, guard):
print("#ifdef {}".format(guard))
print("#undef {}".format(guard))
generator()
print("#endif // {}".format(guard))
def main():
if sys.version_info.major != 3:
import datetime
print("It's " + str(datetime.datetime.now().year) + "! Use Python 3!")
sys.exit(1)
print_header()
generate_with_guard(instruction_parser, "INSTRUCTION_PARSER")
print_footer()
if __name__ == "__main__":
main()
| 60.157382 | 97 | 0.600745 |
import sys
instructions = [
("unreachable", "makeUnreachable()"),
("nop", "makeNop()"),
("block", "makeBlock(s)"),
("loop", "makeLoop(s)"),
("if", "makeIf(s)"),
("then", "makeThenOrElse(s)"),
("else", "makeThenOrElse(s)"),
("br", "makeBreak(s)"),
("br_if", "makeBreak(s)"),
("br_table", "makeBreakTable(s)"),
("return", "makeReturn(s)"),
("call", "makeCall(s, /*isReturn=*/false)"),
("call_indirect", "makeCallIndirect(s, /*isReturn=*/false)"),
("return_call", "makeCall(s, /*isReturn=*/true)"),
("return_call_indirect", "makeCallIndirect(s, /*isReturn=*/true)"),
("drop", "makeDrop(s)"),
("select", "makeSelect(s)"),
("local.get", "makeLocalGet(s)"),
("local.set", "makeLocalSet(s)"),
("local.tee", "makeLocalTee(s)"),
("global.get", "makeGlobalGet(s)"),
("global.set", "makeGlobalSet(s)"),
("memory.init", "makeMemoryInit(s)"),
("data.drop", "makeDataDrop(s)"),
("memory.copy", "makeMemoryCopy(s)"),
("memory.fill", "makeMemoryFill(s)"),
("i32.load", "makeLoad(s, Type::i32, /*isAtomic=*/false)"),
("i64.load", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("f32.load", "makeLoad(s, Type::f32, /*isAtomic=*/false)"),
("f64.load", "makeLoad(s, Type::f64, /*isAtomic=*/false)"),
("i32.load8_s", "makeLoad(s, Type::i32, /*isAtomic=*/false)"),
("i32.load8_u", "makeLoad(s, Type::i32, /*isAtomic=*/false)"),
("i32.load16_s", "makeLoad(s, Type::i32, /*isAtomic=*/false)"),
("i32.load16_u", "makeLoad(s, Type::i32, /*isAtomic=*/false)"),
("i64.load8_s", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("i64.load8_u", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("i64.load16_s", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("i64.load16_u", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("i64.load32_s", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("i64.load32_u", "makeLoad(s, Type::i64, /*isAtomic=*/false)"),
("i32.store", "makeStore(s, Type::i32, /*isAtomic=*/false)"),
("i64.store", "makeStore(s, Type::i64, /*isAtomic=*/false)"),
("f32.store", "makeStore(s, Type::f32, /*isAtomic=*/false)"),
("f64.store", "makeStore(s, Type::f64, /*isAtomic=*/false)"),
("i32.store8", "makeStore(s, Type::i32, /*isAtomic=*/false)"),
("i32.store16", "makeStore(s, Type::i32, /*isAtomic=*/false)"),
("i64.store8", "makeStore(s, Type::i64, /*isAtomic=*/false)"),
("i64.store16", "makeStore(s, Type::i64, /*isAtomic=*/false)"),
("i64.store32", "makeStore(s, Type::i64, /*isAtomic=*/false)"),
("memory.size", "makeMemorySize(s)"),
("memory.grow", "makeMemoryGrow(s)"),
("i32.const", "makeConst(s, Type::i32)"),
("i64.const", "makeConst(s, Type::i64)"),
("f32.const", "makeConst(s, Type::f32)"),
("f64.const", "makeConst(s, Type::f64)"),
("i32.eqz", "makeUnary(s, UnaryOp::EqZInt32)"),
("i32.eq", "makeBinary(s, BinaryOp::EqInt32)"),
("i32.ne", "makeBinary(s, BinaryOp::NeInt32)"),
("i32.lt_s", "makeBinary(s, BinaryOp::LtSInt32)"),
("i32.lt_u", "makeBinary(s, BinaryOp::LtUInt32)"),
("i32.gt_s", "makeBinary(s, BinaryOp::GtSInt32)"),
("i32.gt_u", "makeBinary(s, BinaryOp::GtUInt32)"),
("i32.le_s", "makeBinary(s, BinaryOp::LeSInt32)"),
("i32.le_u", "makeBinary(s, BinaryOp::LeUInt32)"),
("i32.ge_s", "makeBinary(s, BinaryOp::GeSInt32)"),
("i32.ge_u", "makeBinary(s, BinaryOp::GeUInt32)"),
("i64.eqz", "makeUnary(s, UnaryOp::EqZInt64)"),
("i64.eq", "makeBinary(s, BinaryOp::EqInt64)"),
("i64.ne", "makeBinary(s, BinaryOp::NeInt64)"),
("i64.lt_s", "makeBinary(s, BinaryOp::LtSInt64)"),
("i64.lt_u", "makeBinary(s, BinaryOp::LtUInt64)"),
("i64.gt_s", "makeBinary(s, BinaryOp::GtSInt64)"),
("i64.gt_u", "makeBinary(s, BinaryOp::GtUInt64)"),
("i64.le_s", "makeBinary(s, BinaryOp::LeSInt64)"),
("i64.le_u", "makeBinary(s, BinaryOp::LeUInt64)"),
("i64.ge_s", "makeBinary(s, BinaryOp::GeSInt64)"),
("i64.ge_u", "makeBinary(s, BinaryOp::GeUInt64)"),
("f32.eq", "makeBinary(s, BinaryOp::EqFloat32)"),
("f32.ne", "makeBinary(s, BinaryOp::NeFloat32)"),
("f32.lt", "makeBinary(s, BinaryOp::LtFloat32)"),
("f32.gt", "makeBinary(s, BinaryOp::GtFloat32)"),
("f32.le", "makeBinary(s, BinaryOp::LeFloat32)"),
("f32.ge", "makeBinary(s, BinaryOp::GeFloat32)"),
("f64.eq", "makeBinary(s, BinaryOp::EqFloat64)"),
("f64.ne", "makeBinary(s, BinaryOp::NeFloat64)"),
("f64.lt", "makeBinary(s, BinaryOp::LtFloat64)"),
("f64.gt", "makeBinary(s, BinaryOp::GtFloat64)"),
("f64.le", "makeBinary(s, BinaryOp::LeFloat64)"),
("f64.ge", "makeBinary(s, BinaryOp::GeFloat64)"),
("i32.clz", "makeUnary(s, UnaryOp::ClzInt32)"),
("i32.ctz", "makeUnary(s, UnaryOp::CtzInt32)"),
("i32.popcnt", "makeUnary(s, UnaryOp::PopcntInt32)"),
("i32.add", "makeBinary(s, BinaryOp::AddInt32)"),
("i32.sub", "makeBinary(s, BinaryOp::SubInt32)"),
("i32.mul", "makeBinary(s, BinaryOp::MulInt32)"),
("i32.div_s", "makeBinary(s, BinaryOp::DivSInt32)"),
("i32.div_u", "makeBinary(s, BinaryOp::DivUInt32)"),
("i32.rem_s", "makeBinary(s, BinaryOp::RemSInt32)"),
("i32.rem_u", "makeBinary(s, BinaryOp::RemUInt32)"),
("i32.and", "makeBinary(s, BinaryOp::AndInt32)"),
("i32.or", "makeBinary(s, BinaryOp::OrInt32)"),
("i32.xor", "makeBinary(s, BinaryOp::XorInt32)"),
("i32.shl", "makeBinary(s, BinaryOp::ShlInt32)"),
("i32.shr_s", "makeBinary(s, BinaryOp::ShrSInt32)"),
("i32.shr_u", "makeBinary(s, BinaryOp::ShrUInt32)"),
("i32.rotl", "makeBinary(s, BinaryOp::RotLInt32)"),
("i32.rotr", "makeBinary(s, BinaryOp::RotRInt32)"),
("i64.clz", "makeUnary(s, UnaryOp::ClzInt64)"),
("i64.ctz", "makeUnary(s, UnaryOp::CtzInt64)"),
("i64.popcnt", "makeUnary(s, UnaryOp::PopcntInt64)"),
("i64.add", "makeBinary(s, BinaryOp::AddInt64)"),
("i64.sub", "makeBinary(s, BinaryOp::SubInt64)"),
("i64.mul", "makeBinary(s, BinaryOp::MulInt64)"),
("i64.div_s", "makeBinary(s, BinaryOp::DivSInt64)"),
("i64.div_u", "makeBinary(s, BinaryOp::DivUInt64)"),
("i64.rem_s", "makeBinary(s, BinaryOp::RemSInt64)"),
("i64.rem_u", "makeBinary(s, BinaryOp::RemUInt64)"),
("i64.and", "makeBinary(s, BinaryOp::AndInt64)"),
("i64.or", "makeBinary(s, BinaryOp::OrInt64)"),
("i64.xor", "makeBinary(s, BinaryOp::XorInt64)"),
("i64.shl", "makeBinary(s, BinaryOp::ShlInt64)"),
("i64.shr_s", "makeBinary(s, BinaryOp::ShrSInt64)"),
("i64.shr_u", "makeBinary(s, BinaryOp::ShrUInt64)"),
("i64.rotl", "makeBinary(s, BinaryOp::RotLInt64)"),
("i64.rotr", "makeBinary(s, BinaryOp::RotRInt64)"),
("f32.abs", "makeUnary(s, UnaryOp::AbsFloat32)"),
("f32.neg", "makeUnary(s, UnaryOp::NegFloat32)"),
("f32.ceil", "makeUnary(s, UnaryOp::CeilFloat32)"),
("f32.floor", "makeUnary(s, UnaryOp::FloorFloat32)"),
("f32.trunc", "makeUnary(s, UnaryOp::TruncFloat32)"),
("f32.nearest", "makeUnary(s, UnaryOp::NearestFloat32)"),
("f32.sqrt", "makeUnary(s, UnaryOp::SqrtFloat32)"),
("f32.add", "makeBinary(s, BinaryOp::AddFloat32)"),
("f32.sub", "makeBinary(s, BinaryOp::SubFloat32)"),
("f32.mul", "makeBinary(s, BinaryOp::MulFloat32)"),
("f32.div", "makeBinary(s, BinaryOp::DivFloat32)"),
("f32.min", "makeBinary(s, BinaryOp::MinFloat32)"),
("f32.max", "makeBinary(s, BinaryOp::MaxFloat32)"),
("f32.copysign", "makeBinary(s, BinaryOp::CopySignFloat32)"),
("f64.abs", "makeUnary(s, UnaryOp::AbsFloat64)"),
("f64.neg", "makeUnary(s, UnaryOp::NegFloat64)"),
("f64.ceil", "makeUnary(s, UnaryOp::CeilFloat64)"),
("f64.floor", "makeUnary(s, UnaryOp::FloorFloat64)"),
("f64.trunc", "makeUnary(s, UnaryOp::TruncFloat64)"),
("f64.nearest", "makeUnary(s, UnaryOp::NearestFloat64)"),
("f64.sqrt", "makeUnary(s, UnaryOp::SqrtFloat64)"),
("f64.add", "makeBinary(s, BinaryOp::AddFloat64)"),
("f64.sub", "makeBinary(s, BinaryOp::SubFloat64)"),
("f64.mul", "makeBinary(s, BinaryOp::MulFloat64)"),
("f64.div", "makeBinary(s, BinaryOp::DivFloat64)"),
("f64.min", "makeBinary(s, BinaryOp::MinFloat64)"),
("f64.max", "makeBinary(s, BinaryOp::MaxFloat64)"),
("f64.copysign", "makeBinary(s, BinaryOp::CopySignFloat64)"),
("i32.wrap_i64", "makeUnary(s, UnaryOp::WrapInt64)"),
("i32.trunc_f32_s", "makeUnary(s, UnaryOp::TruncSFloat32ToInt32)"),
("i32.trunc_f32_u", "makeUnary(s, UnaryOp::TruncUFloat32ToInt32)"),
("i32.trunc_f64_s", "makeUnary(s, UnaryOp::TruncSFloat64ToInt32)"),
("i32.trunc_f64_u", "makeUnary(s, UnaryOp::TruncUFloat64ToInt32)"),
("i64.extend_i32_s", "makeUnary(s, UnaryOp::ExtendSInt32)"),
("i64.extend_i32_u", "makeUnary(s, UnaryOp::ExtendUInt32)"),
("i64.trunc_f32_s", "makeUnary(s, UnaryOp::TruncSFloat32ToInt64)"),
("i64.trunc_f32_u", "makeUnary(s, UnaryOp::TruncUFloat32ToInt64)"),
("i64.trunc_f64_s", "makeUnary(s, UnaryOp::TruncSFloat64ToInt64)"),
("i64.trunc_f64_u", "makeUnary(s, UnaryOp::TruncUFloat64ToInt64)"),
("f32.convert_i32_s", "makeUnary(s, UnaryOp::ConvertSInt32ToFloat32)"),
("f32.convert_i32_u", "makeUnary(s, UnaryOp::ConvertUInt32ToFloat32)"),
("f32.convert_i64_s", "makeUnary(s, UnaryOp::ConvertSInt64ToFloat32)"),
("f32.convert_i64_u", "makeUnary(s, UnaryOp::ConvertUInt64ToFloat32)"),
("f32.demote_f64", "makeUnary(s, UnaryOp::DemoteFloat64)"),
("f64.convert_i32_s", "makeUnary(s, UnaryOp::ConvertSInt32ToFloat64)"),
("f64.convert_i32_u", "makeUnary(s, UnaryOp::ConvertUInt32ToFloat64)"),
("f64.convert_i64_s", "makeUnary(s, UnaryOp::ConvertSInt64ToFloat64)"),
("f64.convert_i64_u", "makeUnary(s, UnaryOp::ConvertUInt64ToFloat64)"),
("f64.promote_f32", "makeUnary(s, UnaryOp::PromoteFloat32)"),
("i32.reinterpret_f32", "makeUnary(s, UnaryOp::ReinterpretFloat32)"),
("i64.reinterpret_f64", "makeUnary(s, UnaryOp::ReinterpretFloat64)"),
("f32.reinterpret_i32", "makeUnary(s, UnaryOp::ReinterpretInt32)"),
("f64.reinterpret_i64", "makeUnary(s, UnaryOp::ReinterpretInt64)"),
("i32.extend8_s", "makeUnary(s, UnaryOp::ExtendS8Int32)"),
("i32.extend16_s", "makeUnary(s, UnaryOp::ExtendS16Int32)"),
("i64.extend8_s", "makeUnary(s, UnaryOp::ExtendS8Int64)"),
("i64.extend16_s", "makeUnary(s, UnaryOp::ExtendS16Int64)"),
("i64.extend32_s", "makeUnary(s, UnaryOp::ExtendS32Int64)"),
("memory.atomic.notify", "makeAtomicNotify(s)"),
("memory.atomic.wait32", "makeAtomicWait(s, Type::i32)"),
("memory.atomic.wait64", "makeAtomicWait(s, Type::i64)"),
("atomic.fence", "makeAtomicFence(s)"),
("i32.atomic.load8_u", "makeLoad(s, Type::i32, /*isAtomic=*/true)"),
("i32.atomic.load16_u", "makeLoad(s, Type::i32, /*isAtomic=*/true)"),
("i32.atomic.load", "makeLoad(s, Type::i32, /*isAtomic=*/true)"),
("i64.atomic.load8_u", "makeLoad(s, Type::i64, /*isAtomic=*/true)"),
("i64.atomic.load16_u", "makeLoad(s, Type::i64, /*isAtomic=*/true)"),
("i64.atomic.load32_u", "makeLoad(s, Type::i64, /*isAtomic=*/true)"),
("i64.atomic.load", "makeLoad(s, Type::i64, /*isAtomic=*/true)"),
("i32.atomic.store8", "makeStore(s, Type::i32, /*isAtomic=*/true)"),
("i32.atomic.store16", "makeStore(s, Type::i32, /*isAtomic=*/true)"),
("i32.atomic.store", "makeStore(s, Type::i32, /*isAtomic=*/true)"),
("i64.atomic.store8", "makeStore(s, Type::i64, /*isAtomic=*/true)"),
("i64.atomic.store16", "makeStore(s, Type::i64, /*isAtomic=*/true)"),
("i64.atomic.store32", "makeStore(s, Type::i64, /*isAtomic=*/true)"),
("i64.atomic.store", "makeStore(s, Type::i64, /*isAtomic=*/true)"),
("i32.atomic.rmw8.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.add", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.add", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.atomic.rmw8.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.sub", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.sub", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.atomic.rmw8.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.and", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.and", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.atomic.rmw8.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.or", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.or", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.atomic.rmw8.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.xor", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.xor", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.atomic.rmw8.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.xchg", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.xchg", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.atomic.rmw8.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw16.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i32.atomic.rmw.cmpxchg", "makeAtomicRMWOrCmpxchg(s, Type::i32)"),
("i64.atomic.rmw8.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw16.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw32.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i64.atomic.rmw.cmpxchg", "makeAtomicRMWOrCmpxchg(s, Type::i64)"),
("i32.trunc_sat_f32_s", "makeUnary(s, UnaryOp::TruncSatSFloat32ToInt32)"),
("i32.trunc_sat_f32_u", "makeUnary(s, UnaryOp::TruncSatUFloat32ToInt32)"),
("i32.trunc_sat_f64_s", "makeUnary(s, UnaryOp::TruncSatSFloat64ToInt32)"),
("i32.trunc_sat_f64_u", "makeUnary(s, UnaryOp::TruncSatUFloat64ToInt32)"),
("i64.trunc_sat_f32_s", "makeUnary(s, UnaryOp::TruncSatSFloat32ToInt64)"),
("i64.trunc_sat_f32_u", "makeUnary(s, UnaryOp::TruncSatUFloat32ToInt64)"),
("i64.trunc_sat_f64_s", "makeUnary(s, UnaryOp::TruncSatSFloat64ToInt64)"),
("i64.trunc_sat_f64_u", "makeUnary(s, UnaryOp::TruncSatUFloat64ToInt64)"),
("v128.load", "makeLoad(s, Type::v128, /*isAtomic=*/false)"),
("v128.store", "makeStore(s, Type::v128, /*isAtomic=*/false)"),
("v128.const", "makeConst(s, Type::v128)"),
("i8x16.shuffle", "makeSIMDShuffle(s)"),
("i8x16.splat", "makeUnary(s, UnaryOp::SplatVecI8x16)"),
("i8x16.extract_lane_s", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneSVecI8x16, 16)"),
("i8x16.extract_lane_u", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneUVecI8x16, 16)"),
("i8x16.replace_lane", "makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI8x16, 16)"),
("i16x8.splat", "makeUnary(s, UnaryOp::SplatVecI16x8)"),
("i16x8.extract_lane_s", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneSVecI16x8, 8)"),
("i16x8.extract_lane_u", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneUVecI16x8, 8)"),
("i16x8.replace_lane", "makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI16x8, 8)"),
("i32x4.splat", "makeUnary(s, UnaryOp::SplatVecI32x4)"),
("i32x4.extract_lane", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneVecI32x4, 4)"),
("i32x4.replace_lane", "makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI32x4, 4)"),
("i64x2.splat", "makeUnary(s, UnaryOp::SplatVecI64x2)"),
("i64x2.extract_lane", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneVecI64x2, 2)"),
("i64x2.replace_lane", "makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI64x2, 2)"),
("f32x4.splat", "makeUnary(s, UnaryOp::SplatVecF32x4)"),
("f32x4.extract_lane", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneVecF32x4, 4)"),
("f32x4.replace_lane", "makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecF32x4, 4)"),
("f64x2.splat", "makeUnary(s, UnaryOp::SplatVecF64x2)"),
("f64x2.extract_lane", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneVecF64x2, 2)"),
("f64x2.replace_lane", "makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecF64x2, 2)"),
("i8x16.eq", "makeBinary(s, BinaryOp::EqVecI8x16)"),
("i8x16.ne", "makeBinary(s, BinaryOp::NeVecI8x16)"),
("i8x16.lt_s", "makeBinary(s, BinaryOp::LtSVecI8x16)"),
("i8x16.lt_u", "makeBinary(s, BinaryOp::LtUVecI8x16)"),
("i8x16.gt_s", "makeBinary(s, BinaryOp::GtSVecI8x16)"),
("i8x16.gt_u", "makeBinary(s, BinaryOp::GtUVecI8x16)"),
("i8x16.le_s", "makeBinary(s, BinaryOp::LeSVecI8x16)"),
("i8x16.le_u", "makeBinary(s, BinaryOp::LeUVecI8x16)"),
("i8x16.ge_s", "makeBinary(s, BinaryOp::GeSVecI8x16)"),
("i8x16.ge_u", "makeBinary(s, BinaryOp::GeUVecI8x16)"),
("i16x8.eq", "makeBinary(s, BinaryOp::EqVecI16x8)"),
("i16x8.ne", "makeBinary(s, BinaryOp::NeVecI16x8)"),
("i16x8.lt_s", "makeBinary(s, BinaryOp::LtSVecI16x8)"),
("i16x8.lt_u", "makeBinary(s, BinaryOp::LtUVecI16x8)"),
("i16x8.gt_s", "makeBinary(s, BinaryOp::GtSVecI16x8)"),
("i16x8.gt_u", "makeBinary(s, BinaryOp::GtUVecI16x8)"),
("i16x8.le_s", "makeBinary(s, BinaryOp::LeSVecI16x8)"),
("i16x8.le_u", "makeBinary(s, BinaryOp::LeUVecI16x8)"),
("i16x8.ge_s", "makeBinary(s, BinaryOp::GeSVecI16x8)"),
("i16x8.ge_u", "makeBinary(s, BinaryOp::GeUVecI16x8)"),
("i32x4.eq", "makeBinary(s, BinaryOp::EqVecI32x4)"),
("i32x4.ne", "makeBinary(s, BinaryOp::NeVecI32x4)"),
("i32x4.lt_s", "makeBinary(s, BinaryOp::LtSVecI32x4)"),
("i32x4.lt_u", "makeBinary(s, BinaryOp::LtUVecI32x4)"),
("i32x4.gt_s", "makeBinary(s, BinaryOp::GtSVecI32x4)"),
("i32x4.gt_u", "makeBinary(s, BinaryOp::GtUVecI32x4)"),
("i32x4.le_s", "makeBinary(s, BinaryOp::LeSVecI32x4)"),
("i32x4.le_u", "makeBinary(s, BinaryOp::LeUVecI32x4)"),
("i32x4.ge_s", "makeBinary(s, BinaryOp::GeSVecI32x4)"),
("i32x4.ge_u", "makeBinary(s, BinaryOp::GeUVecI32x4)"),
("i64x2.eq", "makeBinary(s, BinaryOp::EqVecI64x2)"),
("i64x2.ne", "makeBinary(s, BinaryOp::NeVecI64x2)"),
("i64x2.lt_s", "makeBinary(s, BinaryOp::LtSVecI64x2)"),
("i64x2.gt_s", "makeBinary(s, BinaryOp::GtSVecI64x2)"),
("i64x2.le_s", "makeBinary(s, BinaryOp::LeSVecI64x2)"),
("i64x2.ge_s", "makeBinary(s, BinaryOp::GeSVecI64x2)"),
("f32x4.eq", "makeBinary(s, BinaryOp::EqVecF32x4)"),
("f32x4.ne", "makeBinary(s, BinaryOp::NeVecF32x4)"),
("f32x4.lt", "makeBinary(s, BinaryOp::LtVecF32x4)"),
("f32x4.gt", "makeBinary(s, BinaryOp::GtVecF32x4)"),
("f32x4.le", "makeBinary(s, BinaryOp::LeVecF32x4)"),
("f32x4.ge", "makeBinary(s, BinaryOp::GeVecF32x4)"),
("f64x2.eq", "makeBinary(s, BinaryOp::EqVecF64x2)"),
("f64x2.ne", "makeBinary(s, BinaryOp::NeVecF64x2)"),
("f64x2.lt", "makeBinary(s, BinaryOp::LtVecF64x2)"),
("f64x2.gt", "makeBinary(s, BinaryOp::GtVecF64x2)"),
("f64x2.le", "makeBinary(s, BinaryOp::LeVecF64x2)"),
("f64x2.ge", "makeBinary(s, BinaryOp::GeVecF64x2)"),
("v128.not", "makeUnary(s, UnaryOp::NotVec128)"),
("v128.and", "makeBinary(s, BinaryOp::AndVec128)"),
("v128.or", "makeBinary(s, BinaryOp::OrVec128)"),
("v128.xor", "makeBinary(s, BinaryOp::XorVec128)"),
("v128.andnot", "makeBinary(s, BinaryOp::AndNotVec128)"),
("v128.any_true", "makeUnary(s, UnaryOp::AnyTrueVec128)"),
("v128.bitselect", "makeSIMDTernary(s, SIMDTernaryOp::Bitselect)"),
("v128.load8_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load8LaneVec128)"),
("v128.load16_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load16LaneVec128)"),
("v128.load32_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load32LaneVec128)"),
("v128.load64_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load64LaneVec128)"),
("v128.store8_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store8LaneVec128)"),
("v128.store16_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store16LaneVec128)"),
("v128.store32_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store32LaneVec128)"),
("v128.store64_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store64LaneVec128)"),
("i8x16.popcnt", "makeUnary(s, UnaryOp::PopcntVecI8x16)"),
("i8x16.abs", "makeUnary(s, UnaryOp::AbsVecI8x16)"),
("i8x16.neg", "makeUnary(s, UnaryOp::NegVecI8x16)"),
("i8x16.all_true", "makeUnary(s, UnaryOp::AllTrueVecI8x16)"),
("i8x16.bitmask", "makeUnary(s, UnaryOp::BitmaskVecI8x16)"),
("i8x16.shl", "makeSIMDShift(s, SIMDShiftOp::ShlVecI8x16)"),
("i8x16.shr_s", "makeSIMDShift(s, SIMDShiftOp::ShrSVecI8x16)"),
("i8x16.shr_u", "makeSIMDShift(s, SIMDShiftOp::ShrUVecI8x16)"),
("i8x16.add", "makeBinary(s, BinaryOp::AddVecI8x16)"),
("i8x16.add_sat_s", "makeBinary(s, BinaryOp::AddSatSVecI8x16)"),
("i8x16.add_sat_u", "makeBinary(s, BinaryOp::AddSatUVecI8x16)"),
("i8x16.sub", "makeBinary(s, BinaryOp::SubVecI8x16)"),
("i8x16.sub_sat_s", "makeBinary(s, BinaryOp::SubSatSVecI8x16)"),
("i8x16.sub_sat_u", "makeBinary(s, BinaryOp::SubSatUVecI8x16)"),
("i8x16.min_s", "makeBinary(s, BinaryOp::MinSVecI8x16)"),
("i8x16.min_u", "makeBinary(s, BinaryOp::MinUVecI8x16)"),
("i8x16.max_s", "makeBinary(s, BinaryOp::MaxSVecI8x16)"),
("i8x16.max_u", "makeBinary(s, BinaryOp::MaxUVecI8x16)"),
("i8x16.avgr_u", "makeBinary(s, BinaryOp::AvgrUVecI8x16)"),
("i16x8.abs", "makeUnary(s, UnaryOp::AbsVecI16x8)"),
("i16x8.neg", "makeUnary(s, UnaryOp::NegVecI16x8)"),
("i16x8.all_true", "makeUnary(s, UnaryOp::AllTrueVecI16x8)"),
("i16x8.bitmask", "makeUnary(s, UnaryOp::BitmaskVecI16x8)"),
("i16x8.shl", "makeSIMDShift(s, SIMDShiftOp::ShlVecI16x8)"),
("i16x8.shr_s", "makeSIMDShift(s, SIMDShiftOp::ShrSVecI16x8)"),
("i16x8.shr_u", "makeSIMDShift(s, SIMDShiftOp::ShrUVecI16x8)"),
("i16x8.add", "makeBinary(s, BinaryOp::AddVecI16x8)"),
("i16x8.add_sat_s", "makeBinary(s, BinaryOp::AddSatSVecI16x8)"),
("i16x8.add_sat_u", "makeBinary(s, BinaryOp::AddSatUVecI16x8)"),
("i16x8.sub", "makeBinary(s, BinaryOp::SubVecI16x8)"),
("i16x8.sub_sat_s", "makeBinary(s, BinaryOp::SubSatSVecI16x8)"),
("i16x8.sub_sat_u", "makeBinary(s, BinaryOp::SubSatUVecI16x8)"),
("i16x8.mul", "makeBinary(s, BinaryOp::MulVecI16x8)"),
("i16x8.min_s", "makeBinary(s, BinaryOp::MinSVecI16x8)"),
("i16x8.min_u", "makeBinary(s, BinaryOp::MinUVecI16x8)"),
("i16x8.max_s", "makeBinary(s, BinaryOp::MaxSVecI16x8)"),
("i16x8.max_u", "makeBinary(s, BinaryOp::MaxUVecI16x8)"),
("i16x8.avgr_u", "makeBinary(s, BinaryOp::AvgrUVecI16x8)"),
("i16x8.q15mulr_sat_s", "makeBinary(s, BinaryOp::Q15MulrSatSVecI16x8)"),
("i16x8.extmul_low_i8x16_s", "makeBinary(s, BinaryOp::ExtMulLowSVecI16x8)"),
("i16x8.extmul_high_i8x16_s", "makeBinary(s, BinaryOp::ExtMulHighSVecI16x8)"),
("i16x8.extmul_low_i8x16_u", "makeBinary(s, BinaryOp::ExtMulLowUVecI16x8)"),
("i16x8.extmul_high_i8x16_u", "makeBinary(s, BinaryOp::ExtMulHighUVecI16x8)"),
("i32x4.abs", "makeUnary(s, UnaryOp::AbsVecI32x4)"),
("i32x4.neg", "makeUnary(s, UnaryOp::NegVecI32x4)"),
("i32x4.all_true", "makeUnary(s, UnaryOp::AllTrueVecI32x4)"),
("i32x4.bitmask", "makeUnary(s, UnaryOp::BitmaskVecI32x4)"),
("i32x4.shl", "makeSIMDShift(s, SIMDShiftOp::ShlVecI32x4)"),
("i32x4.shr_s", "makeSIMDShift(s, SIMDShiftOp::ShrSVecI32x4)"),
("i32x4.shr_u", "makeSIMDShift(s, SIMDShiftOp::ShrUVecI32x4)"),
("i32x4.add", "makeBinary(s, BinaryOp::AddVecI32x4)"),
("i32x4.sub", "makeBinary(s, BinaryOp::SubVecI32x4)"),
("i32x4.mul", "makeBinary(s, BinaryOp::MulVecI32x4)"),
("i32x4.min_s", "makeBinary(s, BinaryOp::MinSVecI32x4)"),
("i32x4.min_u", "makeBinary(s, BinaryOp::MinUVecI32x4)"),
("i32x4.max_s", "makeBinary(s, BinaryOp::MaxSVecI32x4)"),
("i32x4.max_u", "makeBinary(s, BinaryOp::MaxUVecI32x4)"),
("i32x4.dot_i16x8_s", "makeBinary(s, BinaryOp::DotSVecI16x8ToVecI32x4)"),
("i32x4.extmul_low_i16x8_s", "makeBinary(s, BinaryOp::ExtMulLowSVecI32x4)"),
("i32x4.extmul_high_i16x8_s", "makeBinary(s, BinaryOp::ExtMulHighSVecI32x4)"),
("i32x4.extmul_low_i16x8_u", "makeBinary(s, BinaryOp::ExtMulLowUVecI32x4)"),
("i32x4.extmul_high_i16x8_u", "makeBinary(s, BinaryOp::ExtMulHighUVecI32x4)"),
("i64x2.abs", "makeUnary(s, UnaryOp::AbsVecI64x2)"),
("i64x2.neg", "makeUnary(s, UnaryOp::NegVecI64x2)"),
("i64x2.all_true", "makeUnary(s, UnaryOp::AllTrueVecI64x2)"),
("i64x2.bitmask", "makeUnary(s, UnaryOp::BitmaskVecI64x2)"),
("i64x2.shl", "makeSIMDShift(s, SIMDShiftOp::ShlVecI64x2)"),
("i64x2.shr_s", "makeSIMDShift(s, SIMDShiftOp::ShrSVecI64x2)"),
("i64x2.shr_u", "makeSIMDShift(s, SIMDShiftOp::ShrUVecI64x2)"),
("i64x2.add", "makeBinary(s, BinaryOp::AddVecI64x2)"),
("i64x2.sub", "makeBinary(s, BinaryOp::SubVecI64x2)"),
("i64x2.mul", "makeBinary(s, BinaryOp::MulVecI64x2)"),
("i64x2.extmul_low_i32x4_s", "makeBinary(s, BinaryOp::ExtMulLowSVecI64x2)"),
("i64x2.extmul_high_i32x4_s", "makeBinary(s, BinaryOp::ExtMulHighSVecI64x2)"),
("i64x2.extmul_low_i32x4_u", "makeBinary(s, BinaryOp::ExtMulLowUVecI64x2)"),
("i64x2.extmul_high_i32x4_u", "makeBinary(s, BinaryOp::ExtMulHighUVecI64x2)"),
("f32x4.abs", "makeUnary(s, UnaryOp::AbsVecF32x4)"),
("f32x4.neg", "makeUnary(s, UnaryOp::NegVecF32x4)"),
("f32x4.sqrt", "makeUnary(s, UnaryOp::SqrtVecF32x4)"),
("f32x4.add", "makeBinary(s, BinaryOp::AddVecF32x4)"),
("f32x4.sub", "makeBinary(s, BinaryOp::SubVecF32x4)"),
("f32x4.mul", "makeBinary(s, BinaryOp::MulVecF32x4)"),
("f32x4.div", "makeBinary(s, BinaryOp::DivVecF32x4)"),
("f32x4.min", "makeBinary(s, BinaryOp::MinVecF32x4)"),
("f32x4.max", "makeBinary(s, BinaryOp::MaxVecF32x4)"),
("f32x4.pmin", "makeBinary(s, BinaryOp::PMinVecF32x4)"),
("f32x4.pmax", "makeBinary(s, BinaryOp::PMaxVecF32x4)"),
("f32x4.ceil", "makeUnary(s, UnaryOp::CeilVecF32x4)"),
("f32x4.floor", "makeUnary(s, UnaryOp::FloorVecF32x4)"),
("f32x4.trunc", "makeUnary(s, UnaryOp::TruncVecF32x4)"),
("f32x4.nearest", "makeUnary(s, UnaryOp::NearestVecF32x4)"),
("f64x2.abs", "makeUnary(s, UnaryOp::AbsVecF64x2)"),
("f64x2.neg", "makeUnary(s, UnaryOp::NegVecF64x2)"),
("f64x2.sqrt", "makeUnary(s, UnaryOp::SqrtVecF64x2)"),
("f64x2.add", "makeBinary(s, BinaryOp::AddVecF64x2)"),
("f64x2.sub", "makeBinary(s, BinaryOp::SubVecF64x2)"),
("f64x2.mul", "makeBinary(s, BinaryOp::MulVecF64x2)"),
("f64x2.div", "makeBinary(s, BinaryOp::DivVecF64x2)"),
("f64x2.min", "makeBinary(s, BinaryOp::MinVecF64x2)"),
("f64x2.max", "makeBinary(s, BinaryOp::MaxVecF64x2)"),
("f64x2.pmin", "makeBinary(s, BinaryOp::PMinVecF64x2)"),
("f64x2.pmax", "makeBinary(s, BinaryOp::PMaxVecF64x2)"),
("f64x2.ceil", "makeUnary(s, UnaryOp::CeilVecF64x2)"),
("f64x2.floor", "makeUnary(s, UnaryOp::FloorVecF64x2)"),
("f64x2.trunc", "makeUnary(s, UnaryOp::TruncVecF64x2)"),
("f64x2.nearest", "makeUnary(s, UnaryOp::NearestVecF64x2)"),
("i32x4.trunc_sat_f32x4_s", "makeUnary(s, UnaryOp::TruncSatSVecF32x4ToVecI32x4)"),
("i32x4.trunc_sat_f32x4_u", "makeUnary(s, UnaryOp::TruncSatUVecF32x4ToVecI32x4)"),
("f32x4.convert_i32x4_s", "makeUnary(s, UnaryOp::ConvertSVecI32x4ToVecF32x4)"),
("f32x4.convert_i32x4_u", "makeUnary(s, UnaryOp::ConvertUVecI32x4ToVecF32x4)"),
("v128.load8_splat", "makeSIMDLoad(s, SIMDLoadOp::Load8SplatVec128)"),
("v128.load16_splat", "makeSIMDLoad(s, SIMDLoadOp::Load16SplatVec128)"),
("v128.load32_splat", "makeSIMDLoad(s, SIMDLoadOp::Load32SplatVec128)"),
("v128.load64_splat", "makeSIMDLoad(s, SIMDLoadOp::Load64SplatVec128)"),
("v128.load8x8_s", "makeSIMDLoad(s, SIMDLoadOp::Load8x8SVec128)"),
("v128.load8x8_u", "makeSIMDLoad(s, SIMDLoadOp::Load8x8UVec128)"),
("v128.load16x4_s", "makeSIMDLoad(s, SIMDLoadOp::Load16x4SVec128)"),
("v128.load16x4_u", "makeSIMDLoad(s, SIMDLoadOp::Load16x4UVec128)"),
("v128.load32x2_s", "makeSIMDLoad(s, SIMDLoadOp::Load32x2SVec128)"),
("v128.load32x2_u", "makeSIMDLoad(s, SIMDLoadOp::Load32x2UVec128)"),
("v128.load32_zero", "makeSIMDLoad(s, SIMDLoadOp::Load32ZeroVec128)"),
("v128.load64_zero", "makeSIMDLoad(s, SIMDLoadOp::Load64ZeroVec128)"),
("i8x16.narrow_i16x8_s", "makeBinary(s, BinaryOp::NarrowSVecI16x8ToVecI8x16)"),
("i8x16.narrow_i16x8_u", "makeBinary(s, BinaryOp::NarrowUVecI16x8ToVecI8x16)"),
("i16x8.narrow_i32x4_s", "makeBinary(s, BinaryOp::NarrowSVecI32x4ToVecI16x8)"),
("i16x8.narrow_i32x4_u", "makeBinary(s, BinaryOp::NarrowUVecI32x4ToVecI16x8)"),
("i16x8.extend_low_i8x16_s", "makeUnary(s, UnaryOp::ExtendLowSVecI8x16ToVecI16x8)"),
("i16x8.extend_high_i8x16_s", "makeUnary(s, UnaryOp::ExtendHighSVecI8x16ToVecI16x8)"),
("i16x8.extend_low_i8x16_u", "makeUnary(s, UnaryOp::ExtendLowUVecI8x16ToVecI16x8)"),
("i16x8.extend_high_i8x16_u", "makeUnary(s, UnaryOp::ExtendHighUVecI8x16ToVecI16x8)"),
("i32x4.extend_low_i16x8_s", "makeUnary(s, UnaryOp::ExtendLowSVecI16x8ToVecI32x4)"),
("i32x4.extend_high_i16x8_s", "makeUnary(s, UnaryOp::ExtendHighSVecI16x8ToVecI32x4)"),
("i32x4.extend_low_i16x8_u", "makeUnary(s, UnaryOp::ExtendLowUVecI16x8ToVecI32x4)"),
("i32x4.extend_high_i16x8_u", "makeUnary(s, UnaryOp::ExtendHighUVecI16x8ToVecI32x4)"),
("i64x2.extend_low_i32x4_s", "makeUnary(s, UnaryOp::ExtendLowSVecI32x4ToVecI64x2)"),
("i64x2.extend_high_i32x4_s", "makeUnary(s, UnaryOp::ExtendHighSVecI32x4ToVecI64x2)"),
("i64x2.extend_low_i32x4_u", "makeUnary(s, UnaryOp::ExtendLowUVecI32x4ToVecI64x2)"),
("i64x2.extend_high_i32x4_u", "makeUnary(s, UnaryOp::ExtendHighUVecI32x4ToVecI64x2)"),
("i8x16.swizzle", "makeBinary(s, BinaryOp::SwizzleVec8x16)"),
("i16x8.extadd_pairwise_i8x16_s", "makeUnary(s, UnaryOp::ExtAddPairwiseSVecI8x16ToI16x8)"),
("i16x8.extadd_pairwise_i8x16_u", "makeUnary(s, UnaryOp::ExtAddPairwiseUVecI8x16ToI16x8)"),
("i32x4.extadd_pairwise_i16x8_s", "makeUnary(s, UnaryOp::ExtAddPairwiseSVecI16x8ToI32x4)"),
("i32x4.extadd_pairwise_i16x8_u", "makeUnary(s, UnaryOp::ExtAddPairwiseUVecI16x8ToI32x4)"),
("f64x2.convert_low_i32x4_s", "makeUnary(s, UnaryOp::ConvertLowSVecI32x4ToVecF64x2)"),
("f64x2.convert_low_i32x4_u", "makeUnary(s, UnaryOp::ConvertLowUVecI32x4ToVecF64x2)"),
("i32x4.trunc_sat_f64x2_s_zero", "makeUnary(s, UnaryOp::TruncSatZeroSVecF64x2ToVecI32x4)"),
("i32x4.trunc_sat_f64x2_u_zero", "makeUnary(s, UnaryOp::TruncSatZeroUVecF64x2ToVecI32x4)"),
("f32x4.demote_f64x2_zero", "makeUnary(s, UnaryOp::DemoteZeroVecF64x2ToVecF32x4)"),
("f64x2.promote_low_f32x4", "makeUnary(s, UnaryOp::PromoteLowVecF32x4ToVecF64x2)"),
("ref.null", "makeRefNull(s)"),
("ref.is_null", "makeRefIs(s, RefIsNull)"),
("ref.func", "makeRefFunc(s)"),
("try", "makeTry(s)"),
("throw", "makeThrow(s)"),
("rethrow", "makeRethrow(s)"),
("tuple.make", "makeTupleMake(s)"),
("tuple.extract", "makeTupleExtract(s)"),
("pop", "makePop(s)"),
("call_ref", "makeCallRef(s, /*isReturn=*/false)"),
("return_call_ref", "makeCallRef(s, /*isReturn=*/true)"),
("ref.eq", "makeRefEq(s)"),
("i31.new", "makeI31New(s)"),
("i31.get_s", "makeI31Get(s, true)"),
("i31.get_u", "makeI31Get(s, false)"),
("ref.test", "makeRefTest(s)"),
("ref.cast", "makeRefCast(s)"),
("br_on_null", "makeBrOn(s, BrOnNull)"),
("br_on_non_null", "makeBrOn(s, BrOnNonNull)"),
("br_on_cast", "makeBrOn(s, BrOnCast)"),
("br_on_cast_fail", "makeBrOn(s, BrOnCastFail)"),
("br_on_func", "makeBrOn(s, BrOnFunc)"),
("br_on_non_func", "makeBrOn(s, BrOnNonFunc)"),
("br_on_data", "makeBrOn(s, BrOnData)"),
("br_on_non_data", "makeBrOn(s, BrOnNonData)"),
("br_on_i31", "makeBrOn(s, BrOnI31)"),
("br_on_non_i31", "makeBrOn(s, BrOnNonI31)"),
("rtt.canon", "makeRttCanon(s)"),
("rtt.sub", "makeRttSub(s)"),
("struct.new_with_rtt", "makeStructNew(s, false)"),
("struct.new_default_with_rtt", "makeStructNew(s, true)"),
("struct.get", "makeStructGet(s)"),
("struct.get_s", "makeStructGet(s, true)"),
("struct.get_u", "makeStructGet(s, false)"),
("struct.set", "makeStructSet(s)"),
("array.new_with_rtt", "makeArrayNew(s, false)"),
("array.new_default_with_rtt", "makeArrayNew(s, true)"),
("array.get", "makeArrayGet(s)"),
("array.get_s", "makeArrayGet(s, true)"),
("array.get_u", "makeArrayGet(s, false)"),
("array.set", "makeArraySet(s)"),
("array.len", "makeArrayLen(s)"),
("array.copy", "makeArrayCopy(s)"),
("ref.is_func", "makeRefIs(s, RefIsFunc)"),
("ref.is_data", "makeRefIs(s, RefIsData)"),
("ref.is_i31", "makeRefIs(s, RefIsI31)"),
("ref.as_non_null", "makeRefAs(s, RefAsNonNull)"),
("ref.as_func", "makeRefAs(s, RefAsFunc)"),
("ref.as_data", "makeRefAs(s, RefAsData)"),
("ref.as_i31", "makeRefAs(s, RefAsI31)"),
]
class CodePrinter:
indents = 0
def __enter__(self):
CodePrinter.indents += 1
def __exit__(self, *args):
CodePrinter.indents -= 1
def indent(self):
return self
def print_line(self, line):
print(" " * CodePrinter.indents + line)
class Node:
def __init__(self, expr=None, children=None, inst=None):
self.expr = expr
self.children = children if children else {}
self.inst = inst
def _common_prefix(a, b):
prefix = []
while a and b and a[0] == b[0]:
prefix.append(a[0])
a = a[1:]
b = b[1:]
return "".join(prefix)
def do_insert(self, full_inst, inst, expr):
if not inst:
assert self.expr is None, "Repeated instruction " + full_inst
self.expr = expr
self.inst = full_inst
return
prefix, key = "", None
for k in self.children:
prefix = Node._common_prefix(inst, k)
if prefix:
key = k
break
if key is None:
self.children[inst] = Node(expr, inst=full_inst)
return
key_remainder = key[len(prefix):]
if key_remainder:
child = self.children.pop(key)
self.children[prefix] = Node(children={key_remainder: child})
key = prefix
self.children[key].do_insert(full_inst, inst[len(key):], expr)
def insert(self, inst, expr):
self.do_insert(inst, inst, expr)
def instruction_parser():
trie = Node()
inst_length = 0
for inst, expr in instructions:
inst_length = max(inst_length, len(inst))
trie.insert(inst, expr)
printer = CodePrinter()
printer.print_line("char op[{}] = {{'\\0'}};".format(inst_length + 1))
printer.print_line("strncpy(op, s[0]->c_str(), {});".format(inst_length))
def print_leaf(expr, inst):
printer.print_line("if (strcmp(op, \"{inst}\") == 0) {{ return {expr}; }}"
.format(inst=inst, expr=expr))
printer.print_line("goto parse_error;")
def emit(node, idx=0):
assert node.children
printer.print_line("switch (op[{}]) {{".format(idx))
with printer.indent():
if node.expr:
printer.print_line("case '\\0':")
with printer.indent():
print_leaf(node.expr, node.inst)
children = sorted(node.children.items(), key=lambda pair: pair[0])
for prefix, child in children:
if child.children:
printer.print_line("case '{}': {{".format(prefix[0]))
with printer.indent():
emit(child, idx + len(prefix))
printer.print_line("}")
else:
assert child.expr
printer.print_line("case '{}':".format(prefix[0]))
with printer.indent():
print_leaf(child.expr, child.inst)
printer.print_line("default: goto parse_error;")
printer.print_line("}")
emit(trie)
printer.print_line("parse_error:")
with printer.indent():
printer.print_line("throw ParseException(std::string(op), s.line, s.col);")
def print_header():
print("// DO NOT EDIT! This file generated by scripts/gen-s-parser.py\n")
print("// clang-format off\n")
def print_footer():
print("\n// clang-format on")
def generate_with_guard(generator, guard):
print("#ifdef {}".format(guard))
print("#undef {}".format(guard))
generator()
print("#endif // {}".format(guard))
def main():
if sys.version_info.major != 3:
import datetime
print("It's " + str(datetime.datetime.now().year) + "! Use Python 3!")
sys.exit(1)
print_header()
generate_with_guard(instruction_parser, "INSTRUCTION_PARSER")
print_footer()
if __name__ == "__main__":
main()
| true | true |
f7ff7a67c3e28d9981f94d4387fc28f505ae5a02 | 99 | py | Python | pycaprio/core/interfaces/types.py | reckart/pycaprio | 1d030ecd97cb324e404c16520fe6250c49c3bb06 | [
"MIT"
] | 9 | 2019-08-27T11:21:07.000Z | 2021-03-11T15:41:44.000Z | pycaprio/core/interfaces/types.py | reckart/pycaprio | 1d030ecd97cb324e404c16520fe6250c49c3bb06 | [
"MIT"
] | 25 | 2019-09-03T11:05:18.000Z | 2021-04-18T15:57:33.000Z | pycaprio/core/interfaces/types.py | reckart/pycaprio | 1d030ecd97cb324e404c16520fe6250c49c3bb06 | [
"MIT"
] | 6 | 2019-10-02T16:51:10.000Z | 2021-03-11T15:41:52.000Z | from typing import Tuple
authentication_type = Tuple[str, str]
status_list_type = Tuple[int, ...]
| 19.8 | 37 | 0.757576 | from typing import Tuple
authentication_type = Tuple[str, str]
status_list_type = Tuple[int, ...]
| true | true |
f7ff7b8107ee18469aae32781ef7c03d3f35faa9 | 777 | py | Python | room/views.py | josemmercado96/hotel-bookings | dc2b96f8e0ca49df30f490d5c25d8b02c873fcdd | [
"MIT"
] | null | null | null | room/views.py | josemmercado96/hotel-bookings | dc2b96f8e0ca49df30f490d5c25d8b02c873fcdd | [
"MIT"
] | null | null | null | room/views.py | josemmercado96/hotel-bookings | dc2b96f8e0ca49df30f490d5c25d8b02c873fcdd | [
"MIT"
] | null | null | null | from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from room.models import Room
from room.serializers import RoomSerializer
@csrf_exempt
def room_list(request):
if request.method == 'GET':
rooms = Room.objects.all()
serializer = RoomSerializer(rooms, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = RoomSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
# Create your views here.
| 32.375 | 60 | 0.714286 | from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from room.models import Room
from room.serializers import RoomSerializer
@csrf_exempt
def room_list(request):
if request.method == 'GET':
rooms = Room.objects.all()
serializer = RoomSerializer(rooms, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = RoomSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
| true | true |
f7ff7c6935a754a9c8352a21a47e00ba651d0679 | 3,727 | py | Python | fca/algorithms/filtering/stability.py | ksiomelo/cubix | cd9e6dda6696b302a7c0d383259a9d60b15b0d55 | [
"Apache-2.0"
] | 3 | 2015-09-07T00:16:16.000Z | 2019-01-11T20:27:56.000Z | fca/algorithms/filtering/stability.py | ksiomelo/cubix | cd9e6dda6696b302a7c0d383259a9d60b15b0d55 | [
"Apache-2.0"
] | null | null | null | fca/algorithms/filtering/stability.py | ksiomelo/cubix | cd9e6dda6696b302a7c0d383259a9d60b15b0d55 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
stability.py
Created by Nikita Romashkin on 2010-01-19.
"""
from __future__ import division
from copy import deepcopy
from fca import ConceptSystem
def compute_istability(lattice):
"""
Examples
========
>>> from fca import Context, ConceptLattice
>>> ct = [[True, False, False, True],\
[True, False, True, False],\
[False, True, True, False],\
[False, True, True, True]]
>>> objs = [1, 2, 3, 4]
>>> attrs = ['a', 'b', 'c', 'd']
>>> c = Context(ct, objs, attrs)
>>> cl = ConceptLattice(c)
>>> st = compute_estability(cl)
>>> print st
"""
# HACK TO FIX BUG OF MONGO DB ENGINE
top_idx = lattice.index(lattice._top_concept)
bottom_idx = lattice.index(lattice._bottom_concept)
for cpt in lattice._concepts:
cpt.fix_set_field_bug()
lattice._top_concept = lattice._concepts[top_idx]
lattice._bottom_concept = lattice._concepts[bottom_idx]
#end hack
concepts = ConceptSystem(lattice) #bottom concept is wrong
count = {}
subsets = {}
stability = {}
for concept in concepts:
count[concept.concept_id] = len([c for c in concepts if c.extent < concept.extent])
subsets[concept.concept_id] = 2 ** len(concept.extent)
bottom_concepts = set([concepts.bottom_concept])
while not len(concepts) == 0:
bottom_concept = bottom_concepts.pop()
stability[bottom_concept.concept_id] = subsets[bottom_concept.concept_id] / \
(2 ** len(bottom_concept.extent))
concepts.remove(bottom_concept)
for c in concepts:
if bottom_concept.intent > c.intent:
subsets[c.concept_id] -= subsets[bottom_concept.concept_id]
count[c.concept_id] -= 1
if count[c.concept_id] == 0:
bottom_concepts.add(c)
return stability
def compute_estability(lattice):
"""
Examples
========
>>> from fca import ConceptLattice, Context
>>> ct = [[True, False, False, True],\
[True, False, True, False],\
[False, True, True, False],\
[False, True, True, True]]
>>> objs = [1, 2, 3, 4]
>>> attrs = ['a', 'b', 'c', 'd']
>>> c = Context(ct, objs, attrs)
>>> cl = ConceptLattice(c)
>>> st = compute_estability(cl)
>>> print st
"""
# HACK TO FIX BUG OF MONGO DB ENGINE
top_idx = lattice.index(lattice._top_concept)
bottom_idx = lattice.index(lattice._bottom_concept)
for cpt in lattice._concepts:
cpt.fix_set_field_bug()
lattice._top_concept = lattice._concepts[top_idx]
lattice._bottom_concept = lattice._concepts[bottom_idx]
#end hack
concepts = ConceptSystem(lattice)
count = {}
subsets = {}
stability = {}
for concept in concepts:
count[concept.concept_id] = len([c for c in concepts if c.intent < concept.intent])
subsets[concept.concept_id] = 2 ** len(concept.intent)
bottom_concepts = set([concepts.top_concept])
while not len(concepts) == 0:
bottom_concept = bottom_concepts.pop()
stability[bottom_concept.concept_id] = subsets[bottom_concept.concept_id] / \
(2 ** len(bottom_concept.intent))
concepts.remove(bottom_concept)
for c in concepts:
if bottom_concept.intent < c.intent:
subsets[c.concept_id] -= subsets[bottom_concept.concept_id]
count[c.concept_id] -= 1
if count[c.concept_id] == 0:
bottom_concepts.add(c)
return stability
if __name__ == '__main__':
import doctest
doctest.testmod()
| 30.54918 | 91 | 0.602093 |
from __future__ import division
from copy import deepcopy
from fca import ConceptSystem
def compute_istability(lattice):
top_idx = lattice.index(lattice._top_concept)
bottom_idx = lattice.index(lattice._bottom_concept)
for cpt in lattice._concepts:
cpt.fix_set_field_bug()
lattice._top_concept = lattice._concepts[top_idx]
lattice._bottom_concept = lattice._concepts[bottom_idx]
concepts = ConceptSystem(lattice)
count = {}
subsets = {}
stability = {}
for concept in concepts:
count[concept.concept_id] = len([c for c in concepts if c.extent < concept.extent])
subsets[concept.concept_id] = 2 ** len(concept.extent)
bottom_concepts = set([concepts.bottom_concept])
while not len(concepts) == 0:
bottom_concept = bottom_concepts.pop()
stability[bottom_concept.concept_id] = subsets[bottom_concept.concept_id] / \
(2 ** len(bottom_concept.extent))
concepts.remove(bottom_concept)
for c in concepts:
if bottom_concept.intent > c.intent:
subsets[c.concept_id] -= subsets[bottom_concept.concept_id]
count[c.concept_id] -= 1
if count[c.concept_id] == 0:
bottom_concepts.add(c)
return stability
def compute_estability(lattice):
top_idx = lattice.index(lattice._top_concept)
bottom_idx = lattice.index(lattice._bottom_concept)
for cpt in lattice._concepts:
cpt.fix_set_field_bug()
lattice._top_concept = lattice._concepts[top_idx]
lattice._bottom_concept = lattice._concepts[bottom_idx]
concepts = ConceptSystem(lattice)
count = {}
subsets = {}
stability = {}
for concept in concepts:
count[concept.concept_id] = len([c for c in concepts if c.intent < concept.intent])
subsets[concept.concept_id] = 2 ** len(concept.intent)
bottom_concepts = set([concepts.top_concept])
while not len(concepts) == 0:
bottom_concept = bottom_concepts.pop()
stability[bottom_concept.concept_id] = subsets[bottom_concept.concept_id] / \
(2 ** len(bottom_concept.intent))
concepts.remove(bottom_concept)
for c in concepts:
if bottom_concept.intent < c.intent:
subsets[c.concept_id] -= subsets[bottom_concept.concept_id]
count[c.concept_id] -= 1
if count[c.concept_id] == 0:
bottom_concepts.add(c)
return stability
if __name__ == '__main__':
import doctest
doctest.testmod()
| true | true |
f7ff7de8b6ebba334ad7cd7d703ce4677d6418ad | 5,235 | py | Python | app.py | bhernan2/sqlalchemy-challenge | c8f7c9e94efa11b55fe349f1e3cfe9ae8f2478f6 | [
"ADSL"
] | null | null | null | app.py | bhernan2/sqlalchemy-challenge | c8f7c9e94efa11b55fe349f1e3cfe9ae8f2478f6 | [
"ADSL"
] | null | null | null | app.py | bhernan2/sqlalchemy-challenge | c8f7c9e94efa11b55fe349f1e3cfe9ae8f2478f6 | [
"ADSL"
] | null | null | null | from flask import Flask, jsonify
import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
#################################################
#Database setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
#reflect an existing database into a new model
Base = automap_base()
#reflect tables
Base.prepare(engine, reflect=True)
#save reference to the table
measurement = Base.classes.measurement
station=Base.classes.station
#################################################
#Flask setup
#################################################
app = Flask(__name__)
#################################################
#Flask routes
#################################################
@app.route("/")
def home():
"""available api routes"""
return(
f"Climate API<br/>"
f"Available Routes: <br/>"
f"Precipitation: /api/v1.0/precipitation<br/>"
f"Stations: /api/v1.0/stations<br/>"
f"Temperature for one year from last data point: /api/v1.0/tobs<br/>"
f"Temperature stat from the start date(yyyy-mm-dd): /api/v1.0/<start><br/>"
f"Temperature stat from start to end dates(yyyy-mm-dd): /api/v1.0/<start>/<end>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
#create session from Python to the DB
session = Session(engine)
#query for dates and precipitation values
results = session.query(measurement.date, measurement.prcp).\
order_by(measurement.date).all()
#convert to list of dictionaries to jsonify
precip_list = []
for date, prcp in results:
new_dict = {}
new_dict[date] = prcp
precip_list.append(new_dict)
session.close()
return jsonify(precip_list)
@app.route("/api/v1.0/stations")
def stations():
#create session from Python to the DB
session = Session(engine)
stations = {}
#query all stations
results = session.query(station.station, station.name).all()
for s, name in results:
stations[s] = name
session.close()
return jsonify(stations)
@app.route("/api/v1.0/tobs")
def tobs():
#create session from Python to the DB
session = Session(engine)
#get the last date contained in the dataset and date from one year ago
last_date = session.query(measurement.date).order_by(measurement.date.desc()).first()
one_yr_ago = (dt.datetime.strptime(last_date[0],'%Y-%m-%d') \
- dt.timedelta(days=365)).strftime('%Y-%m-%d')
#query for dates and temperature values
results = session.query(measurement.date, measurement.tobs).\
filter(measurement.date >= one_yr_ago).\
order_by(measurement.date).all()
#convert dictionaries to jsonify
date_tobs = []
for date, tobs in results:
new_dict = {}
new_dict[date] = tobs
date_tobs.append(new_dict)
session.close()
return jsonify(date_tobs)
@app.route("/api/v1.0/<start>")
def temp_range_start(start):
"""TMIN, TAVG, and TMAX per date starting from a starting date.
Args:
start (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
#create session from Python to the DB
session = Session(engine)
return_list = []
results = session.query(measurement.date,\
func.min(measurement.tobs), \
func.avg(measurement.tobs), \
func.max(measurement.tobs)).\
filter(measurement.date >= start).\
group_by(measurement.date).all()
for date, min, avg, max in results:
new_dict = {}
new_dict["Date"] = date
new_dict["TMIN"] = min
new_dict["TAVG"] = avg
new_dict["TMAX"] = max
return_list.append(new_dict)
session.close()
return jsonify(return_list)
@app.route("/api/v1.0/<start>/<end>")
def temp_range_start_end(start,end):
"""TMIN, TAVG, and TMAX per date for a date range.
Args:
start (string): A date string in the format %Y-%m-%d
end (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
#create session from Python to the DB
session = Session(engine)
return_list = []
results = session.query( Measurement.date,\
func.min(measurement.tobs), \
func.avg(measurement.tobs), \
func.max(measurement.tobs)).\
filter(and_(measurement.date >= start, measurement.date <= end)).\
group_by(measurement.date).all()
for date, min, avg, max in results:
new_dict = {}
new_dict["Date"] = date
new_dict["TMIN"] = min
new_dict["TAVG"] = avg
new_dict["TMAX"] = max
return_list.append(new_dict)
session.close()
return jsonify(return_list)
if __name__ == '__main__':
app.run(debug=True)
| 29.410112 | 100 | 0.574021 | from flask import Flask, jsonify
import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
| true | true |
f7ff7e96f09f2605ad966a228deacee132e2b18d | 2,478 | py | Python | SS_Conv_lib/ss_conv/sp_ops/tensor.py | Gorilla-Lab-SCUT/SS-Conv | 47d21fdb8f8e02f677201d86295f6ef1c4d1f059 | [
"MIT"
] | 16 | 2021-11-16T08:49:59.000Z | 2022-01-08T07:57:12.000Z | SS_Conv_lib/ss_conv/sp_ops/tensor.py | Gorilla-Lab-SCUT/SS-Conv | 47d21fdb8f8e02f677201d86295f6ef1c4d1f059 | [
"MIT"
] | 2 | 2021-11-18T09:30:31.000Z | 2022-02-23T07:19:13.000Z | SS_Conv_lib/ss_conv/sp_ops/tensor.py | Gorilla-Lab-SCUT/SS-Conv | 47d21fdb8f8e02f677201d86295f6ef1c4d1f059 | [
"MIT"
] | null | null | null | # Modified from https://github.com/traveller59/spconv/tree/v1.1
import numpy as np
import torch
def scatter_nd(indices, updates, shape):
"""pytorch edition of tensorflow scatter_nd.
this function don't contain except handle code. so use this carefully
when indice repeats, don't support repeat add which is supported
in tensorflow.
"""
ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device)
ndim = indices.shape[-1]
output_shape = list(indices.shape[:-1]) + shape[indices.shape[-1]:]
flatted_indices = indices.view(-1, ndim)
slices = [flatted_indices[:, i] for i in range(ndim)]
slices += [Ellipsis]
ret[slices] = updates.view(*output_shape)
return ret
class SparseTensor(object):
def __init__(self, features, indices, spatial_shape, batch_size, grid=None):
"""
Args:
grid: pre-allocated grid tensor. should be used when the volume of spatial shape
is very large.
"""
self.features = features
self.indices = indices
if self.indices.dtype != torch.int32:
self.indices.int()
self.spatial_shape = spatial_shape
self.batch_size = batch_size
self.indice_dict = {}
self.grid = grid
@property
def spatial_size(self):
return np.prod(self.spatial_shape)
def find_indice_pair(self, key):
if key is None:
return None
if key in self.indice_dict:
return self.indice_dict[key]
return None
def dense(self, channels_first=True):
output_shape = [self.batch_size] + list(self.spatial_shape) + [self.features.shape[1]]
res = scatter_nd(self.indices.long(), self.features, output_shape)
if not channels_first:
return res
ndim = len(self.spatial_shape)
trans_params = list(range(0, ndim + 1))
trans_params.insert(1, ndim + 1)
return res.permute(*trans_params).contiguous()
def get_offsets(self):
offsets = [0]
for i in range(self.batch_size):
is_i = (self.indices[:,0]==i).sum()
offsets.append(is_i.item()+offsets[-1])
offsets = torch.tensor(offsets).int().to(self.features.device).detach()
return offsets
@property
def sparity(self):
return self.indices.shape[0] / np.prod(self.spatial_shape) / self.batch_size
| 35.4 | 95 | 0.616626 |
import numpy as np
import torch
def scatter_nd(indices, updates, shape):
ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device)
ndim = indices.shape[-1]
output_shape = list(indices.shape[:-1]) + shape[indices.shape[-1]:]
flatted_indices = indices.view(-1, ndim)
slices = [flatted_indices[:, i] for i in range(ndim)]
slices += [Ellipsis]
ret[slices] = updates.view(*output_shape)
return ret
class SparseTensor(object):
def __init__(self, features, indices, spatial_shape, batch_size, grid=None):
self.features = features
self.indices = indices
if self.indices.dtype != torch.int32:
self.indices.int()
self.spatial_shape = spatial_shape
self.batch_size = batch_size
self.indice_dict = {}
self.grid = grid
@property
def spatial_size(self):
return np.prod(self.spatial_shape)
def find_indice_pair(self, key):
if key is None:
return None
if key in self.indice_dict:
return self.indice_dict[key]
return None
def dense(self, channels_first=True):
output_shape = [self.batch_size] + list(self.spatial_shape) + [self.features.shape[1]]
res = scatter_nd(self.indices.long(), self.features, output_shape)
if not channels_first:
return res
ndim = len(self.spatial_shape)
trans_params = list(range(0, ndim + 1))
trans_params.insert(1, ndim + 1)
return res.permute(*trans_params).contiguous()
def get_offsets(self):
offsets = [0]
for i in range(self.batch_size):
is_i = (self.indices[:,0]==i).sum()
offsets.append(is_i.item()+offsets[-1])
offsets = torch.tensor(offsets).int().to(self.features.device).detach()
return offsets
@property
def sparity(self):
return self.indices.shape[0] / np.prod(self.spatial_shape) / self.batch_size
| true | true |
f7ff7f04c3516f64ec459968e23c349f5062921f | 2,265 | py | Python | face_alignment.py | PVSemk/ABAW2020TNT | 3cf667e0958f411b510c734755da5e30a091df11 | [
"MIT"
] | 28 | 2020-03-11T09:27:22.000Z | 2022-03-05T15:42:38.000Z | face_alignment.py | PVSemk/ABAW2020TNT | 3cf667e0958f411b510c734755da5e30a091df11 | [
"MIT"
] | 9 | 2020-05-18T20:37:44.000Z | 2022-02-02T10:01:01.000Z | face_alignment.py | PVSemk/ABAW2020TNT | 3cf667e0958f411b510c734755da5e30a091df11 | [
"MIT"
] | 9 | 2020-02-28T07:53:26.000Z | 2022-01-24T09:26:38.000Z | """
Code from
"Two-Stream Aural-Visual Affect Analysis in the Wild"
Felix Kuhnke and Lars Rumberg and Joern Ostermann
Please see https://github.com/kuhnkeF/ABAW2020TNT
"""
import cv2 as cv
import numpy as np
import os
def align_rescale_face(image, M):
aligned = cv.warpAffine(image, M, (112, 112), flags=cv.INTER_CUBIC, borderValue=0.0)
return aligned
def render_img_and_mask(img, mask, frame_nr, render_path, mask_path):
frame_nr_str = str(frame_nr).zfill(5)
frame = cv.cvtColor(img, cv.COLOR_BGR2RGB)
output_filepath = os.path.join(render_path, frame_nr_str + '.jpg')
cv.imwrite(output_filepath, frame, [int(cv.IMWRITE_JPEG_QUALITY), 95])
frame_mask = cv.cvtColor(mask, cv.COLOR_BGR2GRAY)
output_filepath = os.path.join(mask_path, frame_nr_str + '.jpg')
cv.imwrite(output_filepath, frame_mask, [int(cv.IMWRITE_JPEG_QUALITY), 100])
def draw_mask(points, image):
line_type = cv.LINE_8
left_eyebrow = points[17:22, :]
right_eyebrow = points[22:27, :]
nose_bridge = points[28:31, :]
chin = points[6:11, :]
mouth_outer = points[48:60, :]
left_eye = points[36:42, :]
right_eye = points[42:48, :]
pts = [np.rint(mouth_outer).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, True, color=(255, 255, 255), thickness=1, lineType=line_type)
pts = [np.rint(left_eyebrow).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, False, color=(223, 223, 223), thickness=1, lineType=line_type)
pts = [np.rint(right_eyebrow).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, False, color=(191, 191, 191), thickness=1, lineType=line_type)
pts = [np.rint(left_eye).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, True, color=(159, 159, 159), thickness=1, lineType=line_type)
pts = [np.rint(right_eye).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, True, color=(127, 127, 127), thickness=1, lineType=line_type)
pts = [np.rint(nose_bridge).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, False, color=(63, 63, 63), thickness=1, lineType=line_type)
pts = [np.rint(chin).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, False, color=(31, 31, 31), thickness=1, lineType=line_type)
| 48.191489 | 91 | 0.690508 | import cv2 as cv
import numpy as np
import os
def align_rescale_face(image, M):
aligned = cv.warpAffine(image, M, (112, 112), flags=cv.INTER_CUBIC, borderValue=0.0)
return aligned
def render_img_and_mask(img, mask, frame_nr, render_path, mask_path):
frame_nr_str = str(frame_nr).zfill(5)
frame = cv.cvtColor(img, cv.COLOR_BGR2RGB)
output_filepath = os.path.join(render_path, frame_nr_str + '.jpg')
cv.imwrite(output_filepath, frame, [int(cv.IMWRITE_JPEG_QUALITY), 95])
frame_mask = cv.cvtColor(mask, cv.COLOR_BGR2GRAY)
output_filepath = os.path.join(mask_path, frame_nr_str + '.jpg')
cv.imwrite(output_filepath, frame_mask, [int(cv.IMWRITE_JPEG_QUALITY), 100])
def draw_mask(points, image):
line_type = cv.LINE_8
left_eyebrow = points[17:22, :]
right_eyebrow = points[22:27, :]
nose_bridge = points[28:31, :]
chin = points[6:11, :]
mouth_outer = points[48:60, :]
left_eye = points[36:42, :]
right_eye = points[42:48, :]
pts = [np.rint(mouth_outer).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, True, color=(255, 255, 255), thickness=1, lineType=line_type)
pts = [np.rint(left_eyebrow).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, False, color=(223, 223, 223), thickness=1, lineType=line_type)
pts = [np.rint(right_eyebrow).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, False, color=(191, 191, 191), thickness=1, lineType=line_type)
pts = [np.rint(left_eye).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, True, color=(159, 159, 159), thickness=1, lineType=line_type)
pts = [np.rint(right_eye).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, True, color=(127, 127, 127), thickness=1, lineType=line_type)
pts = [np.rint(nose_bridge).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, False, color=(63, 63, 63), thickness=1, lineType=line_type)
pts = [np.rint(chin).reshape(-1, 1, 2).astype(np.int32)]
cv.polylines(image, pts, False, color=(31, 31, 31), thickness=1, lineType=line_type)
| true | true |
f7ff7f27cabc44f8774bf119552d01cf2efb441a | 8,645 | py | Python | home/pi/TP-IoT/send_receive_simple_sensor_data.py | lupyuen/RaspberryPiImage | 664e8a74b4628d710feab5582ef59b344b9ffddd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | home/pi/TP-IoT/send_receive_simple_sensor_data.py | lupyuen/RaspberryPiImage | 664e8a74b4628d710feab5582ef59b344b9ffddd | [
"Apache-2.0"
] | null | null | null | home/pi/TP-IoT/send_receive_simple_sensor_data.py | lupyuen/RaspberryPiImage | 664e8a74b4628d710feab5582ef59b344b9ffddd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | #!/usr/bin/env python3
# Send DHT22 sensor data periodically to AWS IoT and process actuation commands received.
import time
import datetime
import ssl
import json
import paho.mqtt.client as mqtt
import dht22
import pigpio
import RPi.GPIO as GPIO
# TODO: Change this to the name of our Raspberry Pi, also known as our "Thing Name"
deviceName = "g88pi"
# Public certificate of our Raspberry Pi, as provided by AWS IoT.
deviceCertificate = "tp-iot-certificate.pem.crt"
# Private key of our Raspberry Pi, as provided by AWS IoT.
devicePrivateKey = "tp-iot-private.pem.key"
# Root certificate to authenticate AWS IoT when we connect to their server.
awsCert = "aws-iot-rootCA.crt"
isConnected = False
# Assume we connected the DHT22 Sensor, YwRobot Light Sensor, L-934ID-5V LED as follows:
# DHT22/AM2302 --> Raspberry Pi:
# + --> GPIO 8
# Out --> GPIO 22
# - --> Ground (Pin 14)
power = 8
temp_sensor = 22
# YwRobot Light Sensor --> Raspberry Pi:
# Ground --> Ground (Pin 9)
# VCC --> 3.3V Power (Pin 1)
# DOUT --> GPIO 4
light_sensor = 4
# L-934ID-5V LED --> Raspberry Pi
# + --> GPIO 25
# Ground --> Ground (Pin 20)
led = 25
# This is the main logic of the program. We connect to AWS IoT via MQTT, send sensor data periodically to AWS IoT,
# and handle any actuation commands received from AWS IoT.
def main():
global isConnected
# Create an MQTT client for connecting to AWS IoT via MQTT.
client = mqtt.Client(deviceName + "_sr") # Client ID must be unique because AWS will disconnect any duplicates.
client.on_connect = on_connect # When connected, call on_connect.
client.on_message = on_message # When message received, call on_message.
client.on_log = on_log # When logging debug messages, call on_log.
# Set the certificates and private key for connecting to AWS IoT. TLS 1.2 is mandatory for AWS IoT and is supported
# only in Python 3.4 and later, compiled with OpenSSL 1.0.1 and later.
client.tls_set(awsCert, deviceCertificate, devicePrivateKey, ssl.CERT_REQUIRED, ssl.PROTOCOL_TLSv1_2)
# Connect to AWS IoT server. Use AWS command line "aws iot describe-endpoint" to get the address.
print("Connecting to AWS IoT...")
client.connect("A1P01IYM2DOZA0.iot.us-west-2.amazonaws.com", 8883, 60)
# Start a background thread to process the MQTT network commands concurrently, including auto-reconnection.
client.loop_start()
# Prepare the DHT22 sensor. Ensure we don't read from the DHT22 within 2 seconds, else it will eventually hang.
dht22_sensor = dht22.Sensor(pigpio.pi(), temp_sensor, power=power)
# Set the pin numbering to the BCM (same as GPIO) numbering format.
GPIO.setmode(GPIO.BCM)
# We tell the system that the LED port should be an output port, not input.
GPIO.setup(led, GPIO.OUT)
time.sleep(1)
# Loop forever.
while True:
try:
# If we are not connected yet to AWS IoT, wait 1 second and try again.
if not isConnected:
time.sleep(1)
continue
# Read DHT22 sensor values. Skip if we detect an error.
dht22_sensor.trigger()
if dht22_sensor.bad_checksum() + dht22_sensor.short_message() + dht22_sensor.missing_message() + \
dht22_sensor.sensor_resets() != 0 or dht22_sensor.temperature() < 0 or dht22_sensor.humidity() < 0:
print(("DHT22 may be connected incorrectly: temperature={:3.1f}, humidity={:3.1f}, bad_checksum={}, " +
"short_message={}, missing_message={}, sensor_resets={}")
.format(dht22_sensor.temperature(), dht22_sensor.humidity(), dht22_sensor.bad_checksum(),
dht22_sensor.short_message(), dht22_sensor.missing_message(),
dht22_sensor.sensor_resets()))
continue
# Prepare our sensor data in JSON format.
payload = {
"state": {
"reported": {
"temperature": round(dht22_sensor.temperature(), 1),
"humidity": round(dht22_sensor.humidity(), 1),
"timestamp": datetime.datetime.now().isoformat()
}
}
}
print("Sending sensor data to AWS IoT...\n" +
json.dumps(payload, indent=4, separators=(',', ': ')))
# Publish our sensor data to AWS IoT via the MQTT topic, also known as updating our "Thing Shadow".
client.publish("$aws/things/" + deviceName + "/shadow/update", json.dumps(payload))
print("Sent to AWS IoT")
# Wait 30 seconds before sending the next set of sensor data.
time.sleep(30)
except KeyboardInterrupt:
# Stop the program when we press Ctrl-C.
break
except Exception as e:
# For all other errors, we wait a while and resume.
print("Exception: " + str(e))
time.sleep(10)
continue
# This is called when we are connected to AWS IoT via MQTT.
# We subscribe for notifications of desired state updates.
def on_connect(client, userdata, flags, rc):
global isConnected
isConnected = True
print("Connected to AWS IoT")
# Subscribe to our MQTT topic so that we will receive notifications of updates.
topic = "$aws/things/" + deviceName + "/shadow/update/accepted"
print("Subscribing to MQTT topic " + topic)
client.subscribe(topic)
# This is called when we receive a subscription notification from AWS IoT.
# If this is an actuation command, we execute it.
def on_message(client, userdata, msg):
# Convert the JSON payload to a Python dictionary.
# The payload is in binary format so we need to decode as UTF-8.
payload2 = json.loads(msg.payload.decode("utf-8"))
print("Received message, topic: " + msg.topic + ", payload:\n" +
json.dumps(payload2, indent=4, separators=(',', ': ')))
# If there is a desired state in this message, then we actuate,
# e.g. if we see "led=on", we switch on the LED.
if payload2.get("state") is not None and payload2["state"].get("desired") is not None:
# Get the desired state and loop through all attributes inside.
desired_state = payload2["state"]["desired"]
for attribute in desired_state:
# We handle the attribute and desired value by actuating.
value = desired_state.get(attribute)
actuate(client, attribute, value)
# Control my actuators based on the specified attribute and value,
# e.g. "led=on" will switch on my LED.
def actuate(client, attribute, value):
if attribute == "timestamp":
# Ignore the timestamp attribute, it's only for info.
return
print("Setting " + attribute + " to " + value + "...")
if attribute == "led":
# We actuate the LED for "on", "off" or "flash1".
if value == "on":
# Switch on LED.
GPIO.output(led, True)
send_reported_state(client, "led", "on")
return
elif value == "off":
# Switch off LED.
GPIO.output(led, False)
send_reported_state(client, "led", "off")
return
elif value == "flash1":
# Switch on LED, wait 1 second, switch it off.
GPIO.output(led, True)
send_reported_state(client, "led", "on")
time.sleep(1)
GPIO.output(led, False)
send_reported_state(client, "led", "off")
time.sleep(1)
return
# Show an error if attribute or value are incorrect.
print("Error: Don't know how to set " + attribute + " to " + value)
# Send the reported state of our actuator tp AWS IoT after it has been triggered, e.g. "led": "on".
def send_reported_state(client, attribute, value):
# Prepare our sensor data in JSON format.
payload = {
"state": {
"reported": {
attribute: value,
"timestamp": datetime.datetime.now().isoformat()
}
}
}
print("Sending sensor data to AWS IoT...\n" +
json.dumps(payload, indent=4, separators=(',', ': ')))
# Publish our sensor data to AWS IoT via the MQTT topic, also known as updating our "Thing Shadow".
client.publish("$aws/things/" + deviceName + "/shadow/update", json.dumps(payload))
print("Sent to AWS IoT")
# Print out log messages for tracing.
def on_log(client, userdata, level, buf):
print("Log: " + buf)
# Start the main program.
main()
| 40.209302 | 120 | 0.628918 |
import time
import datetime
import ssl
import json
import paho.mqtt.client as mqtt
import dht22
import pigpio
import RPi.GPIO as GPIO
deviceName = "g88pi"
deviceCertificate = "tp-iot-certificate.pem.crt"
devicePrivateKey = "tp-iot-private.pem.key"
awsCert = "aws-iot-rootCA.crt"
isConnected = False
power = 8
temp_sensor = 22
light_sensor = 4
led = 25
def main():
global isConnected
client = mqtt.Client(deviceName + "_sr")
client.on_connect = on_connect
client.on_message = on_message
client.on_log = on_log
client.tls_set(awsCert, deviceCertificate, devicePrivateKey, ssl.CERT_REQUIRED, ssl.PROTOCOL_TLSv1_2)
print("Connecting to AWS IoT...")
client.connect("A1P01IYM2DOZA0.iot.us-west-2.amazonaws.com", 8883, 60)
client.loop_start()
dht22_sensor = dht22.Sensor(pigpio.pi(), temp_sensor, power=power)
# Set the pin numbering to the BCM (same as GPIO) numbering format.
GPIO.setmode(GPIO.BCM)
# We tell the system that the LED port should be an output port, not input.
GPIO.setup(led, GPIO.OUT)
time.sleep(1)
# Loop forever.
while True:
try:
# If we are not connected yet to AWS IoT, wait 1 second and try again.
if not isConnected:
time.sleep(1)
continue
# Read DHT22 sensor values. Skip if we detect an error.
dht22_sensor.trigger()
if dht22_sensor.bad_checksum() + dht22_sensor.short_message() + dht22_sensor.missing_message() + \
dht22_sensor.sensor_resets() != 0 or dht22_sensor.temperature() < 0 or dht22_sensor.humidity() < 0:
print(("DHT22 may be connected incorrectly: temperature={:3.1f}, humidity={:3.1f}, bad_checksum={}, " +
"short_message={}, missing_message={}, sensor_resets={}")
.format(dht22_sensor.temperature(), dht22_sensor.humidity(), dht22_sensor.bad_checksum(),
dht22_sensor.short_message(), dht22_sensor.missing_message(),
dht22_sensor.sensor_resets()))
continue
# Prepare our sensor data in JSON format.
payload = {
"state": {
"reported": {
"temperature": round(dht22_sensor.temperature(), 1),
"humidity": round(dht22_sensor.humidity(), 1),
"timestamp": datetime.datetime.now().isoformat()
}
}
}
print("Sending sensor data to AWS IoT...\n" +
json.dumps(payload, indent=4, separators=(',', ': ')))
# Publish our sensor data to AWS IoT via the MQTT topic, also known as updating our "Thing Shadow".
client.publish("$aws/things/" + deviceName + "/shadow/update", json.dumps(payload))
print("Sent to AWS IoT")
# Wait 30 seconds before sending the next set of sensor data.
time.sleep(30)
except KeyboardInterrupt:
# Stop the program when we press Ctrl-C.
break
except Exception as e:
# For all other errors, we wait a while and resume.
print("Exception: " + str(e))
time.sleep(10)
continue
# This is called when we are connected to AWS IoT via MQTT.
# We subscribe for notifications of desired state updates.
def on_connect(client, userdata, flags, rc):
global isConnected
isConnected = True
print("Connected to AWS IoT")
# Subscribe to our MQTT topic so that we will receive notifications of updates.
topic = "$aws/things/" + deviceName + "/shadow/update/accepted"
print("Subscribing to MQTT topic " + topic)
client.subscribe(topic)
# This is called when we receive a subscription notification from AWS IoT.
# If this is an actuation command, we execute it.
def on_message(client, userdata, msg):
# Convert the JSON payload to a Python dictionary.
# The payload is in binary format so we need to decode as UTF-8.
payload2 = json.loads(msg.payload.decode("utf-8"))
print("Received message, topic: " + msg.topic + ", payload:\n" +
json.dumps(payload2, indent=4, separators=(',', ': ')))
# If there is a desired state in this message, then we actuate,
# e.g. if we see "led=on", we switch on the LED.
if payload2.get("state") is not None and payload2["state"].get("desired") is not None:
# Get the desired state and loop through all attributes inside.
desired_state = payload2["state"]["desired"]
for attribute in desired_state:
# We handle the attribute and desired value by actuating.
value = desired_state.get(attribute)
actuate(client, attribute, value)
# Control my actuators based on the specified attribute and value,
# e.g. "led=on" will switch on my LED.
def actuate(client, attribute, value):
if attribute == "timestamp":
# Ignore the timestamp attribute, it's only for info.
return
print("Setting " + attribute + " to " + value + "...")
if attribute == "led":
if value == "on":
GPIO.output(led, True)
send_reported_state(client, "led", "on")
return
elif value == "off":
GPIO.output(led, False)
send_reported_state(client, "led", "off")
return
elif value == "flash1":
GPIO.output(led, True)
send_reported_state(client, "led", "on")
time.sleep(1)
GPIO.output(led, False)
send_reported_state(client, "led", "off")
time.sleep(1)
return
print("Error: Don't know how to set " + attribute + " to " + value)
# Send the reported state of our actuator tp AWS IoT after it has been triggered, e.g. "led": "on".
def send_reported_state(client, attribute, value):
# Prepare our sensor data in JSON format.
payload = {
"state": {
"reported": {
attribute: value,
"timestamp": datetime.datetime.now().isoformat()
}
}
}
print("Sending sensor data to AWS IoT...\n" +
json.dumps(payload, indent=4, separators=(',', ': ')))
# Publish our sensor data to AWS IoT via the MQTT topic, also known as updating our "Thing Shadow".
client.publish("$aws/things/" + deviceName + "/shadow/update", json.dumps(payload))
print("Sent to AWS IoT")
# Print out log messages for tracing.
def on_log(client, userdata, level, buf):
print("Log: " + buf)
# Start the main program.
main()
| true | true |
f7ff7fe2e92fb27fbdd160ff97682e7e6331c076 | 1,649 | py | Python | tests/local/warehouse/metrics/test_schema.py | udemy/soda-sql | 53dd2218770c74367787f1d85ee0d48d498b51eb | [
"Apache-2.0"
] | null | null | null | tests/local/warehouse/metrics/test_schema.py | udemy/soda-sql | 53dd2218770c74367787f1d85ee0d48d498b51eb | [
"Apache-2.0"
] | 1 | 2021-02-23T20:47:40.000Z | 2021-03-06T09:03:48.000Z | tests/local/warehouse/metrics/test_schema.py | udemy/soda-sql | 53dd2218770c74367787f1d85ee0d48d498b51eb | [
"Apache-2.0"
] | 1 | 2021-02-23T20:41:24.000Z | 2021-02-23T20:41:24.000Z | # Copyright 2020 Soda
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sodasql.scan.metric import Metric
from tests.common.sql_test_case import SqlTestCase
class TestSchema(SqlTestCase):
def test_schema_measurement(self):
dialect = self.warehouse.dialect
self.sql_recreate_table(
[f"id {self.dialect.data_type_varchar_255}",
f"name {self.dialect.data_type_varchar_255}",
f"size {self.dialect.data_type_integer}"],
["('1', 'one', 1)"])
scan_result = self.scan()
measurement = scan_result.find_measurement(Metric.SCHEMA)
self.assertIsNotNone(measurement)
columns_by_name_lower = {column['name'].lower(): column for column in measurement.value}
column = columns_by_name_lower['id']
self.assertTrue(dialect.is_text(column['type']))
column = columns_by_name_lower['name']
self.assertTrue(dialect.is_text(column['type']))
column = columns_by_name_lower['size']
self.assertTrue(dialect.is_number(column['type']))
self.assertIsNone(scan_result.find_measurement(Metric.ROW_COUNT))
| 38.348837 | 96 | 0.70285 |
from sodasql.scan.metric import Metric
from tests.common.sql_test_case import SqlTestCase
class TestSchema(SqlTestCase):
def test_schema_measurement(self):
dialect = self.warehouse.dialect
self.sql_recreate_table(
[f"id {self.dialect.data_type_varchar_255}",
f"name {self.dialect.data_type_varchar_255}",
f"size {self.dialect.data_type_integer}"],
["('1', 'one', 1)"])
scan_result = self.scan()
measurement = scan_result.find_measurement(Metric.SCHEMA)
self.assertIsNotNone(measurement)
columns_by_name_lower = {column['name'].lower(): column for column in measurement.value}
column = columns_by_name_lower['id']
self.assertTrue(dialect.is_text(column['type']))
column = columns_by_name_lower['name']
self.assertTrue(dialect.is_text(column['type']))
column = columns_by_name_lower['size']
self.assertTrue(dialect.is_number(column['type']))
self.assertIsNone(scan_result.find_measurement(Metric.ROW_COUNT))
| true | true |
f7ff81cb5d1842bd07cc97ea8cf9a175840000c9 | 48,186 | py | Python | src/routes/routes_training.py | Supreeth-Shetty/Projectathon---Simplified-AI | 3fc26a58a9370d119811ac4e864af977c21f6c40 | [
"MIT"
] | 8 | 2021-12-23T06:05:00.000Z | 2021-12-26T05:39:00.000Z | src/routes/routes_training.py | Supreeth-Shetty/Projectathon---Simplified-AI | 3fc26a58a9370d119811ac4e864af977c21f6c40 | [
"MIT"
] | null | null | null | src/routes/routes_training.py | Supreeth-Shetty/Projectathon---Simplified-AI | 3fc26a58a9370d119811ac4e864af977c21f6c40 | [
"MIT"
] | 2 | 2021-12-23T06:10:11.000Z | 2021-12-23T07:24:28.000Z | from flask import Blueprint, redirect, url_for, render_template, request, session
from src.constants.model_params import Ridge_Params, Lasso_Params, ElasticNet_Params, RandomForestRegressor_Params, \
SVR_params, AdabootRegressor_Params, \
GradientBoostRegressor_Params
from src.constants.model_params import KmeansClustering_Params, DbscanClustering_Params, AgglomerativeClustering_Params
from src.constants.model_params import LogisticRegression_Params, SVC_Params, KNeighborsClassifier_Params, \
DecisionTreeClassifier_Params, RandomForestClassifier_Params, GradientBoostingClassifier_Params, \
AdaBoostClassifier_Params
from src.constants.constants import ACTIVATION_FUNCTIONS, CLASSIFICATION_MODELS, CLUSTERING_MODELS, OPTIMIZERS, \
REGRESSION_LOSS, POOLING
from flask.json import jsonify
from src.constants.model_params import DecisionTreeRegressor_Params, LinearRegression_Params
from src.model.custom.classification_models import ClassificationModels
from src.model.custom.regression_models import RegressionModels
from src.model.custom.clustering_models import ClusteringModels
from src.preprocessing.preprocessing_helper import Preprocessing
from src.constants.constants import REGRESSION_MODELS
from src.utils.common.prediction_helper import make_prediction
from src.utils.databases.mysql_helper import MySqlHelper
from werkzeug.utils import secure_filename
import os
from src.utils.common.common_helper import get_param_value, load_prediction_result, load_project_model, \
read_config, save_prediction_result, save_project_model
import pandas as pd
from src.utils.common.data_helper import load_data
from src.model.auto.Auto_classification import ModelTrain_Classification
from src.model.auto.Auto_regression import ModelTrain_Regression
from src.feature_engineering.feature_engineering_helper import FeatureEngineering
from loguru import logger
from from_root import from_root
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, accuracy_score, precision_score, \
f1_score, recall_score
from src.utils.common.project_report_helper import ProjectReports
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import numpy as np
from sklearn.model_selection import train_test_split
from prettytable import PrettyTable
from src.utils.common.plotly_helper import PlotlyHelper
app_training = Blueprint('training', __name__)
config_args = read_config("./config.yaml")
mysql = MySqlHelper.get_connection_obj()
log_path = os.path.join(from_root(), config_args['logs']['logger'], config_args['logs']['generallogs_file'])
logger.add(sink=log_path, format="[{time:YYYY-MM-DD HH:mm:ss.SSS} - {level} - {module} ] - {message}", level="INFO")
UPLOAD_FOLDER = config_args['dir_structure']['upload_folder']
ALLOWED_EXTENSIONS = set(['zip'])
@app_training.route('/model_training/<action>', methods=['GET'])
def model_training(action):
try:
if 'pid' in session:
df = load_data()
if df is not None:
target_column = ""
if session['target_column'] is not None:
target_column = session['target_column']
target_column = session['target_column']
cols_ = [col for col in df.columns if col != target_column]
# Check data contain any categorical independent features
Categorical_columns = Preprocessing.col_seperator(df.loc[:, cols_], "Categorical_columns")
if len(Categorical_columns.columns) > 0:
return render_template('model_training/auto_training.html', project_type=session['project_type'],
target_column=session['target_column'], status="error",
msg="Data contain some categorical indepedent features, please perform encoding first")
"""Check If Project type is Regression or Classificaion and target Columns is not Selected"""
if session['project_type'] != 3 and session['target_column'] is None:
return redirect('/target-column')
if action == 'help':
return render_template('model_training/help.html')
elif action == 'auto_training':
logger.info('Redirect To Auto Training Page')
ProjectReports.insert_record_ml('Redirect To Auto Training Page')
if session['project_type'] == 3:
return render_template('model_training/auto_training.html',
project_type=session['project_type'],
target_column=session['target_column'], status="error",
msg="Auto Training is not available for Clustering!!!")
return render_template('model_training/auto_training.html', project_type=session['project_type'],
target_column=session['target_column'])
elif action == 'custom_training' or action == 'final_train_model':
query = f""" select a.pid ProjectId , a.TargetColumn TargetName,
a.Model_Name ModelName,
b.Schedule_date,
b.schedule_time ,
a.Model_Trained,
b.train_status ,
b.email,
b.deleted
from tblProjects as a
join tblProject_scheduler as b on a.Pid = b.ProjectId where b.ProjectId = '{session.get('project_name')}'
and b.deleted=0
"""
result = mysql.fetch_one(query)
if result is not None:
return render_template('scheduler/training_blocker.html')
logger.info('Redirect To Custom Training Page')
ProjectReports.insert_record_ml('Redirect To Custom Training Page')
try:
if session['project_type'] == 2:
return render_template('model_training/classification.html', action=action,
models=CLASSIFICATION_MODELS)
elif session['project_type'] == 1:
return render_template('model_training/regression.html', action=action,
models=REGRESSION_MODELS)
elif session['project_type'] == 3:
return render_template('model_training/clustering.html', action=action,
models=CLUSTERING_MODELS)
else:
return render_template('model_training/custom_training.html')
except Exception as e:
logger.error(e)
return render_template('model_training/custom_training.html')
else:
return 'Non-Implemented Action'
else:
return redirect('/')
else:
return redirect(url_for('/'))
except Exception as e:
logger.error('Error in Model Training')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/model_training/<action>', methods=['POST'])
def model_training_post(action):
try:
if 'pid' in session:
df = load_data()
model = None
range = None
random_state = None
if df is not None:
if action == 'help':
return render_template('model_training/help.html')
elif action == 'custom_training':
try:
model = request.form['model']
range = int(request.form['range'])
if model != "KNeighborsClassifier" and model != "SVR":
random_state = int(request.form['random_state'])
logger.info('Submitted Custom Training Page')
ProjectReports.insert_record_ml('Submitted Custom Training Page',
f"Model:{model}; Range:{range}; Random_State: {random_state}")
target = session['target_column']
if session['project_type'] != 3:
X = df.drop(target, axis=1)
y = df[target]
train_model_fun = None
X_train, X_test, y_train, y_test = FeatureEngineering.train_test_Split(cleanedData=X,
label=y,
train_size=range / 100,
random_state=random_state)
model_params = {}
if model == "LinearRegression":
Model_Params = LinearRegression_Params
train_model_fun = RegressionModels.linear_regression_regressor
elif model == "Ridge":
Model_Params = Ridge_Params
train_model_fun = RegressionModels.ridge_regressor
elif model == "Lasso":
Model_Params = Lasso_Params
train_model_fun = RegressionModels.lasso_regressor
elif model == "ElasticNet":
Model_Params = ElasticNet_Params
train_model_fun = RegressionModels.elastic_net_regressor
elif model == "DecisionTreeRegressor":
Model_Params = DecisionTreeRegressor_Params
train_model_fun = RegressionModels.decision_tree_regressor
elif model == "RandomForestRegressor":
Model_Params = RandomForestRegressor_Params
train_model_fun = RegressionModels.random_forest_regressor
elif model == "SVR":
Model_Params = SVR_params
train_model_fun = RegressionModels.support_vector_regressor
elif model == "AdaBoostRegressor":
Model_Params = AdabootRegressor_Params
train_model_fun = RegressionModels.ada_boost_regressor
elif model == "GradientBoostingRegressor":
Model_Params = GradientBoostRegressor_Params
train_model_fun = RegressionModels.gradient_boosting_regressor
elif model == "LogisticRegression":
Model_Params = LogisticRegression_Params
train_model_fun = ClassificationModels.logistic_regression_classifier
elif model == "SVC":
Model_Params = SVC_Params
train_model_fun = ClassificationModels.support_vector_classifier
elif model == "KNeighborsClassifier":
print('here')
Model_Params = KNeighborsClassifier_Params
train_model_fun = ClassificationModels.k_neighbors_classifier
elif model == "DecisionTreeClassifier":
Model_Params = DecisionTreeClassifier_Params
train_model_fun = ClassificationModels.decision_tree_classifier
elif model == "RandomForestClassifier":
Model_Params = RandomForestClassifier_Params
train_model_fun = ClassificationModels.random_forest_classifier
elif model == "AdaBoostClassifier":
Model_Params = AdaBoostClassifier_Params
train_model_fun = ClassificationModels.ada_boost_classifier
elif model == "GradientBoostClassifier":
Model_Params = GradientBoostingClassifier_Params
train_model_fun = ClassificationModels.gradient_boosting_classifier
else:
return 'Non-Implemented Action'
for param in Model_Params:
model_params[param['name']] = get_param_value(param, request.form[param['name']])
trained_model = train_model_fun(X_train, y_train, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model)
reports = [{"key": "Model Name", "value": model},
{"key": "Data Size", "value": len(df)},
{"key": "Trained Data Size", "value": len(X_train)},
{"key": "Test Data Size", "value": len(X_test)}]
scores = []
# Regression
if trained_model is not None and session['project_type'] == 1:
y_pred = trained_model.predict(X_test)
scores.append({"key": "r2_score", "value": r2_score(y_test, y_pred)})
scores.append(
{"key": "mean_absolute_error", "value": mean_absolute_error(y_test, y_pred)})
scores.append(
{"key": "mean_squared_error", "value": mean_squared_error(y_test, y_pred)})
# Model Name Set in table while training
query = f'''Update tblProjects Set Model_Name="{model}", Model_Trained=0 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
# Classification
if trained_model is not None and session['project_type'] == 2:
y_pred = trained_model.predict(X_test)
scores.append({"key": "Accuracy", "value": accuracy_score(y_test, y_pred)})
scores.append({"key": "Classes", "value": df[target].unique()})
scores.append(
{"key": "Precision", "value": precision_score(y_test, y_pred, average=None)})
scores.append({"key": "Recall", "value": recall_score(y_test, y_pred, average=None)})
scores.append({"key": "F1_score", "value": f1_score(y_test, y_pred, average=None)})
# Model Name Set in table while training
query = f'''Update tblProjects Set Model_Name="{model}", Model_Trained=0 Where Id="{session.get('pid')}"'''
result = mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
elif session['project_type'] == 3:
X = df
train_model_fun = None
model_params = {}
if model == "KMeans":
Model_Params = KmeansClustering_Params
train_model_fun = ClusteringModels.kmeans_clustering
elif model == "DBSCAN":
Model_Params = DbscanClustering_Params
train_model_fun = ClusteringModels.dbscan_clustering
elif model == "AgglomerativeClustering":
Model_Params = AgglomerativeClustering_Params
train_model_fun = ClusteringModels.agglomerative_clustering
else:
return 'Non-Implemented Action'
for param in Model_Params:
model_params[param['name']] = get_param_value(param, request.form[param['name']])
trained_model, y_pred = train_model_fun(X, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model)
reports = [{"key": "Model Name", "value": model},
{"key": "Data Size", "value": len(df)},
{"key": "Train Data Size", "value": len(X)},
{"key": "Test Data Size", "value": 0}]
scores = []
# Clustering
if trained_model is not None and session['project_type'] == 3:
scores.append({"key": "Predicted Classes",
"value": pd.DataFrame(data=y_pred, columns=['y_pred'])[
'y_pred'].unique()})
# Model Name Set in table while training
query = f'''Update tblProjects Set Model_Name="{model}", Model_Trained=0 Where Id="{session.get('pid')}"'''
result = mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
else:
raise Exception("Model Couldn't train, please check parametes")
except Exception as e:
logger.error('Error Submitted Custom Training Page')
ProjectReports.insert_record_ml('Error Submitted Custom Training Page',
f"Model:{model}; Range:{range}; Random_State: {random_state}",
'', 0, str(e))
if session['project_type'] == 2:
return render_template('model_training/classification.html', action=action,
models=CLASSIFICATION_MODELS, status="error", msg=str(e))
elif session['project_type'] == 1:
return render_template('model_training/regression.html', action=action,
models=REGRESSION_MODELS, status="error", msg=str(e))
else:
return render_template('model_training/clustering.html', action=action,
models=CLUSTERING_MODELS, status="error", msg=str(e))
elif action == "auto_training":
try:
target = session['target_column']
if target is None:
return redirect(url_for('/target-column'))
# data_len = len(df)
# data_len = 10000 if data_len > 10000 else int(len(df) * 0.9)
# df = df.sample(frac=1).loc[:data_len, :]
trainer = None
X = df.drop(target, axis=1)
y = df[target]
X_train, X_test, y_train, y_test = FeatureEngineering.train_test_Split(cleanedData=X,
label=y,
train_size=0.75,
random_state=101)
if session['project_type'] == 1:
trainer = ModelTrain_Regression(X_train, X_test, y_train, y_test, True)
result = trainer.results()
result = result.to_html()
return render_template('model_training/auto_training.html', status="success",
project_type=session['project_type'],
target_column=session['target_column'], train_done=True,
result=result)
elif session['project_type'] == 2:
trainer = ModelTrain_Classification(X_train, X_test, y_train, y_test, True)
result = trainer.results()
result = result.to_html()
return render_template('model_training/auto_training.html', status="success",
project_type=session['project_type'],
target_column=session['target_column'], train_done=True,
result=result)
except Exception as ex:
return render_template('model_training/auto_training.html', status="error",
project_type=session['project_type'],
target_column=session['target_column'], msg=str(ex))
elif action == 'final_train_model':
try:
logger.info('Final Train Model')
ProjectReports.insert_record_ml('Final Train Model')
query = f'''select Model_Name from tblProjects Where Id="{session.get('pid')}"'''
model_name = mysql.fetch_one(query)[0]
if session['project_type'] != 3:
target = session['target_column']
X = df.drop(target, axis=1)
y = df[target]
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', action=action,
status="error",
msg="Model is not found, please train model again")
else:
model_params = {}
for key, value in model.get_params().items():
model_params[key] = value
if model_name == "LinearRegression":
train_model_fun = RegressionModels.linear_regression_regressor
elif model_name == "Ridge":
train_model_fun = RegressionModels.ridge_regressor
elif model_name == "Lasso":
train_model_fun = RegressionModels.lasso_regressor
elif model_name == "ElasticNet":
train_model_fun = RegressionModels.elastic_net_regressor
elif model_name == "DecisionTreeRegressor":
train_model_fun = RegressionModels.decision_tree_regressor
elif model_name == "RandomForestRegressor":
train_model_fun = RegressionModels.random_forest_regressor
elif model_name == "SVR":
train_model_fun = RegressionModels.support_vector_regressor
elif model_name == "AdaBoostRegressor":
train_model_fun = RegressionModels.ada_boost_regressor
elif model_name == "GradientBoostingRegressor":
train_model_fun = RegressionModels.gradient_boosting_regressor
elif model_name == "LogisticRegression":
train_model_fun = ClassificationModels.logistic_regression_classifier
elif model_name == "SVC":
train_model_fun = ClassificationModels.support_vector_classifier
elif model_name == "KNeighborsClassifier":
train_model_fun = ClassificationModels.k_neighbors_classifier
elif model_name == "DecisionTreeClassifier":
train_model_fun = ClassificationModels.decision_tree_classifier
elif model_name == "RandomForestClassifier":
train_model_fun = ClassificationModels.random_forest_classifier
elif model_name == "AdaBoostClassifier":
train_model_fun = ClassificationModels.ada_boost_classifier
elif model_name == "GradientBoostClassifier":
train_model_fun = ClassificationModels.gradient_boosting_classifier
else:
return 'Non-Implemented Action'
trained_model = train_model_fun(X, y, True, **model_params)
"""Save Final Model"""
save_project_model(trained_model, 'model.pkl')
query = f'''Update tblProjects Set Model_Trained=1 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
logger.info('Final Training Done')
ProjectReports.insert_record_ml('Final Training Done')
return render_template('model_training/congrats.html')
elif session['project_type'] == 3:
X = df
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', action=action,
status="error",
msg="Model is not found, please train model again")
else:
model_params = {}
for key, value in model.get_params().items():
model_params[key] = value
if model_name == "KMeans":
train_model_fun = ClusteringModels.kmeans_clustering
elif model_name == "DBSCAN":
train_model_fun = ClusteringModels.dbscan_clustering
elif model_name == "AgglomerativeClustering":
train_model_fun = ClusteringModels.agglomerative_clustering
else:
return 'Non Implemented mtd'
trained_model, y_pred = train_model_fun(X, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model, 'model.pkl')
query = f'''Update tblProjects Set Model_Trained=1 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
logger.info('Final Training Done')
ProjectReports.insert_record_ml('Final Training Done')
return render_template('model_training/congrats.html')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
render_template('model_training/model_result.html', action=action, status="error",
msg="Model is not found, please train model again")
if action == "Scheduled_model":
path = os.path.join(from_root(), 'artifacts', 'model_temp.pkl')
pass
else:
return "Non Implemented Method"
else:
logger.critical('DataFrame has no data')
return redirect('/')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/congrats', methods=['GET', 'POST'])
def congrats():
try:
if 'pid' in session:
df = load_data()
if df is not None:
target = session['target_column']
X = df.drop(target, axis=1)
y = df[target]
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', status="error",
msg="Model is not found, please train model again")
else:
for key, value in model.get_params():
exec(key + "=value")
logger.info('Loaded Congrats Page')
ProjectReports.insert_record_ml('Loaded Congrats Page')
if request.method == "GET":
return render_template('model_training/congrats.html')
else:
return render_template('model_training/congrats.html')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/prediction', methods=['GET', 'POST'])
def prediction():
try:
if 'pid' in session:
file_path = ""
logger.info('Loaded Prediction Page')
ProjectReports.insert_record_ml('Loaded Prediction Page')
if request.method == "GET":
is_trained = mysql.fetch_all(
f"SELECT * FROM tblProjects WHERE Id ={session.get('pid')} AND Model_Trained=1")
if is_trained is None:
return render_template('model_training/prediction_page.html', status="error",
msg="your model is not trained, please train model first")
else:
return render_template('model_training/prediction_page.html', status="success")
else:
try:
f = request.files['file']
ALLOWED_EXTENSIONS = ['csv', 'tsv', 'json']
msg = ""
if len(request.files) == 0:
msg = 'Please select a file to upload'
elif f.filename.strip() == '':
msg = 'Please select a file to upload'
elif f.filename.rsplit('.', 1)[1].lower() not in ALLOWED_EXTENSIONS:
msg = 'This file format is not allowed, please select mentioned one'
if msg:
logger.error(msg)
return render_template('model_training/prediction_page.html', status="error", msg=msg)
filename = secure_filename(f.filename)
file_path = os.path.join(config_args['dir_structure']['upload_folder'], filename)
f.save(file_path)
if file_path.endswith('.csv'):
df = pd.read_csv(file_path)
elif file_path.endswith('.tsv'):
df = pd.read_csv(file_path, sep='\t')
elif file_path.endswith('.json'):
df = pd.read_json(file_path)
else:
msg = 'This file format is currently not supported'
logger.info(msg)
return render_template('model_training/prediction_page.html', status="error", msg=msg)
prediction = make_prediction(df)
data = prediction.to_html()
if len(data) > 0:
save_prediction_result(prediction)
return render_template('model_training/prediction_result.html', status="success", data=data)
else:
return render_template('model_training/prediction_result.html', status="error",
msg="There is some issue, coudn't perform prediction. Please check your data")
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('model_training/prediction_page.html', status="error", msg=str(e))
finally:
if file_path:
os.remove(file_path)
else:
logger.error('Project id not found, redirect to home page')
ProjectReports.insert_record_ml('Project id not found, redirect to home page', '', '', 0, 'Error')
return redirect('/')
except Exception as e:
logger.error(e)
return redirect('/')
@app_training.route('/download_prediction', methods=['POST'])
def download_prediction():
try:
return load_prediction_result()
except Exception as e:
logger.error(e)
return jsonify({'success': False})
@app_training.route('/model_training/ann', methods=['GET'])
def ann_training():
try:
return render_template('model_training/ann.html', optimizers=OPTIMIZERS,
activation_functions=ACTIVATION_FUNCTIONS, loss=REGRESSION_LOSS)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
def save_neural_network(checkpoint, name='model_temp.pth.tar'):
path = os.path.join(from_root(), 'artifacts', session.get('project_name'))
if not os.path.exists(path):
os.mkdir(path)
file_name = os.path.join(path, name)
torch.save(checkpoint, file_name)
def load_neural_network(checkpoint, name='model_temp.pth.tar'):
path = os.path.join(from_root(), 'artifacts', session.get('project_name'))
if not os.path.exists(path):
os.mkdir(path)
file_name = os.path.join(path, name)
torch.save(checkpoint, file_name)
def create_layers(data=None, df=None, feature_map={}, typ=None):
layers = []
activation = {'ReLU': nn.ReLU(),
'ELU': nn.ELU(),
'LeakyReLU': nn.LeakyReLU(),
'Softmax': nn.Softmax(),
'PReLU': nn.PReLU(),
'SELU': nn.SELU(),
'Tanh': nn.Tanh(),
'Softplus': nn.Softplus(),
'Softmin': nn.Softmin(),
'Sigmoid': nn.Sigmoid(),
'RReLU': nn.RReLU(),
}
infer_in = data[0]['units']
for i in data:
if i['type'] == 'input':
in_feature = df.shape[1]
out_feature = i['units']
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
layers.append(activation[i['activation']])
if i['type'] == 'linear':
in_feature = infer_in
out_feature = i['units']
layers.append(nn.Linear(in_feature, out_feature))
layers.append(activation[i['activation']])
infer_in = out_feature
if i['type'] == 'batch_normalization':
layers.append(nn.BatchNorm1d(num_features=infer_in))
if i['type'] == 'dropout':
layers.append(nn.Dropout(p=i['percentage']))
if i['type'] == 'output':
if typ == 'Regression':
in_feature = infer_in
out_feature = 1
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
if typ == 'Classification':
in_feature = infer_in
out_feature = len(feature_map.keys())
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
if typ == 'cluestring':
return 'CLuestring cant be performed using Ann'
return layers
class CustomTrainData(Dataset):
def __init__(self, train_df, target):
self.train_df = train_df
self.target = target
self.x = torch.from_numpy(self.train_df.to_numpy())
self.y = torch.from_numpy(self.target.to_numpy())
self.n_sample = self.train_df.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_sample
class CustomTestData(Dataset):
def __init__(self, test_df, target):
self.test_df = test_df
self.target = target
self.x = torch.from_numpy(self.test_df.to_numpy())
self.y = torch.from_numpy(self.target.to_numpy())
self.n_sample = self.test_df.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_sample
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
table.add_row([name, param])
total_params += param
return table, total_params
def trainTestSplit(df, target, size=0.25):
X = df.drop(target, axis=1)
y = df[target]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1 - size, random_state=101)
return X_train, X_test, y_train, y_test
def main(Data=None, df=None, target=None, size=None, num_epoch=None, typ=None):
model_info = {}
model_metrice = {}
model_metrice_plot = {}
feature_map = {}
if typ == 'Classification':
for i in enumerate(df[target].unique()):
feature_map[i[1]] = i[0]
df[target] = df[target].replace(feature_map)
model_info['feature_map'] = feature_map
model_info['split_size'] = size
model_info['batch_size'] = 32
X_train, X_test, y_train, y_test = trainTestSplit(df, target, size=size)
# Data class creation
trainData = CustomTrainData(X_train, y_train)
testData = CustomTestData(X_test, y_test)
# Data loader creation
train_data_loader = DataLoader(trainData, batch_size=32, shuffle=True)
test_data_loader = DataLoader(testData, batch_size=32)
# Model Creation
model = nn.Sequential(*create_layers(Data['layerUnits'], X_train, feature_map, typ))
print(model)
# Optimizer and Loss ---- > front end
table, total_params = count_parameters(model)
model_info['table'] = table.get_html_string()
model_info['total_params'] = total_params
model_info['optimizer'] = Data['optimizers']
model_info['loss'] = Data['loss']
model_info['model'] = list(model)
optimizer_selection = {'Adam': torch.optim.Adam(model.parameters(), lr=float(Data['learningRate'])),
'AdaGrad': torch.optim.Adagrad(model.parameters(), lr=float(Data['learningRate'])),
'AdaMax': torch.optim.Adamax(model.parameters(), lr=float(Data['learningRate'])),
'RMSProps': torch.optim.RMSprop(model.parameters(), lr=float(Data['learningRate']))}
optimizer = optimizer_selection[Data['optimizers']]
if typ == "Classification":
loss_selection_classification = {'BCEWithLogitsLoss': nn.BCEWithLogitsLoss(), 'CrossEntropyLoss': nn.CrossEntropyLoss()}
loss_func = loss_selection_classification[Data['loss']]
if typ == "Regression":
loss_selection_regression = {'MAE': nn.L1Loss(), 'MSE': nn.MSELoss(), 'Huber Loss': nn.HuberLoss(),
'Smoth L1': nn.SmoothL1Loss()}
loss_func = loss_selection_regression[Data['loss']]
print(loss_func)
# Regression
# Train
if typ == "Regression":
loss_perEpoch = []
model.train()
num_epochs = num_epoch
for epooch in range(num_epochs):
for batch_idx, data in enumerate(train_data_loader):
features = data[0].float()
labels = data[1].float().reshape(features.shape[0],1)
# print(features.shape,labels.shape)
optimizer.zero_grad()
output = model(features)
loss = loss_func(output, labels)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
loss_perEpoch.append(loss.item())
print(f'Epoch {epooch}/{num_epochs} Loss: {loss.item()}')
model_metrice['train_loss'] = loss_perEpoch[-1]
model_metrice_plot['train_loss'] = loss_perEpoch
model_metrice_plot['train_accuracy'] = [x for x in range(len(loss_perEpoch))]
# Test
model.eval()
test_loss = []
with torch.no_grad():
for idx, data in enumerate(test_data_loader):
features = data[0].float()
labels = data[1].float().reshape(features.shape[0],1)
output = model(features)
test_loss.append(loss_func(output, labels).item())
model_metrice['test_loss'] = np.mean(test_loss)
model_metrice['test_accuracy'] = None
model_metrice_plot['test_loss'] = test_loss
model_metrice_plot['test_accuracy'] = [x for x in range(len(test_loss))]
print("Test Loss :", np.mean(test_loss))
# Classification
if typ == 'Classification':
# Train
loss_perEpoch = []
train_acc = []
model.train()
num_epochs = num_epoch
for epooch in range(num_epochs):
for batch_idx, data in enumerate(train_data_loader):
features = data[0].float()
labels = data[1]
# print(features,labels)
optimizer.zero_grad()
output = model(features)
loss = loss_func(output, labels)
loss.backward()
optimizer.step()
if batch_idx % 8 == 0:
train_acc.append((torch.argmax(output, axis=1) == labels.squeeze().long()).float().mean())
loss_perEpoch.append(loss.item())
print(f'Epoch {epooch}/{num_epochs} Loss: {loss.item()}')
model_metrice['train_loss'] = loss_perEpoch[-1]
model_metrice_plot['train_loss'] = loss_perEpoch
model_metrice_plot['train_accuracy'] = train_acc
# Test
model.eval()
test_loss = []
test_acc = []
with torch.no_grad():
for idx, data in enumerate(test_data_loader):
features = data[0].float()
labels = data[1]
output = model(features)
test_acc.append((torch.argmax(output, axis=1) == labels.squeeze().long()).float().mean())
test_loss.append(loss_func(output, labels).item())
print("Test Loss :", np.mean(test_loss), " ", "Test Accuracy :", np.mean(test_acc))
model_metrice['test_accuracy'] = np.mean(test_acc)
model_metrice['test_loss'] = np.mean(test_loss)
model_metrice_plot['test_loss'] = test_loss
model_metrice_plot['test_accuracy'] = [x for x in range(len(test_loss))]
return model_info, model_metrice, model_metrice_plot
@app_training.route('/model_training/ann', methods=['POST'])
def ann_model_training():
try:
data = request.get_json(force=True)
print(data)
df = load_data()
target = session['target_column']
typ = 'Regression' if session['project_type'] == 1 else 'Classification'
model_info, model_metrice, model_metrice_plot = main(data, df, target=target, size=float(data['trainSplitPercent']), num_epoch=int(data['epoch']), typ=typ)
graphJSON = {}
graphJSON['train'] = PlotlyHelper.line(df, x=model_metrice_plot['train_accuracy'], y=model_metrice_plot['train_loss'])
graphJSON['test'] = PlotlyHelper.line(df, x=model_metrice_plot['test_accuracy'], y=model_metrice_plot['test_loss'])
return render_template('model_training/ann_summary.html', model_info=model_info, model_metrice=model_metrice, status="success", graphJSON=graphJSON)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
@app_training.route('/model_training/cnn', methods=['GET'])
def cnn_training():
try:
return render_template('model_training/cnn.html', optimizers=OPTIMIZERS, poolings = POOLING,
activation_functions=ACTIVATION_FUNCTIONS, loss=REGRESSION_LOSS)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app_training.route('/model_training/upload_zip', methods=['POST'])
def cnn_model_training():
try:
if 'zip_file' not in request.files:
print('No file part')
file = request.files['zip_file']
if file.filename == '':
print('No selected file')
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(UPLOAD_FOLDER, filename))
return jsonify({'success': True})
except Exception as e:
logger.error(e)
return jsonify({'success': False})
| 50.882788 | 163 | 0.522102 | from flask import Blueprint, redirect, url_for, render_template, request, session
from src.constants.model_params import Ridge_Params, Lasso_Params, ElasticNet_Params, RandomForestRegressor_Params, \
SVR_params, AdabootRegressor_Params, \
GradientBoostRegressor_Params
from src.constants.model_params import KmeansClustering_Params, DbscanClustering_Params, AgglomerativeClustering_Params
from src.constants.model_params import LogisticRegression_Params, SVC_Params, KNeighborsClassifier_Params, \
DecisionTreeClassifier_Params, RandomForestClassifier_Params, GradientBoostingClassifier_Params, \
AdaBoostClassifier_Params
from src.constants.constants import ACTIVATION_FUNCTIONS, CLASSIFICATION_MODELS, CLUSTERING_MODELS, OPTIMIZERS, \
REGRESSION_LOSS, POOLING
from flask.json import jsonify
from src.constants.model_params import DecisionTreeRegressor_Params, LinearRegression_Params
from src.model.custom.classification_models import ClassificationModels
from src.model.custom.regression_models import RegressionModels
from src.model.custom.clustering_models import ClusteringModels
from src.preprocessing.preprocessing_helper import Preprocessing
from src.constants.constants import REGRESSION_MODELS
from src.utils.common.prediction_helper import make_prediction
from src.utils.databases.mysql_helper import MySqlHelper
from werkzeug.utils import secure_filename
import os
from src.utils.common.common_helper import get_param_value, load_prediction_result, load_project_model, \
read_config, save_prediction_result, save_project_model
import pandas as pd
from src.utils.common.data_helper import load_data
from src.model.auto.Auto_classification import ModelTrain_Classification
from src.model.auto.Auto_regression import ModelTrain_Regression
from src.feature_engineering.feature_engineering_helper import FeatureEngineering
from loguru import logger
from from_root import from_root
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, accuracy_score, precision_score, \
f1_score, recall_score
from src.utils.common.project_report_helper import ProjectReports
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import numpy as np
from sklearn.model_selection import train_test_split
from prettytable import PrettyTable
from src.utils.common.plotly_helper import PlotlyHelper
app_training = Blueprint('training', __name__)
config_args = read_config("./config.yaml")
mysql = MySqlHelper.get_connection_obj()
log_path = os.path.join(from_root(), config_args['logs']['logger'], config_args['logs']['generallogs_file'])
logger.add(sink=log_path, format="[{time:YYYY-MM-DD HH:mm:ss.SSS} - {level} - {module} ] - {message}", level="INFO")
UPLOAD_FOLDER = config_args['dir_structure']['upload_folder']
ALLOWED_EXTENSIONS = set(['zip'])
@app_training.route('/model_training/<action>', methods=['GET'])
def model_training(action):
try:
if 'pid' in session:
df = load_data()
if df is not None:
target_column = ""
if session['target_column'] is not None:
target_column = session['target_column']
target_column = session['target_column']
cols_ = [col for col in df.columns if col != target_column]
Categorical_columns = Preprocessing.col_seperator(df.loc[:, cols_], "Categorical_columns")
if len(Categorical_columns.columns) > 0:
return render_template('model_training/auto_training.html', project_type=session['project_type'],
target_column=session['target_column'], status="error",
msg="Data contain some categorical indepedent features, please perform encoding first")
if session['project_type'] != 3 and session['target_column'] is None:
return redirect('/target-column')
if action == 'help':
return render_template('model_training/help.html')
elif action == 'auto_training':
logger.info('Redirect To Auto Training Page')
ProjectReports.insert_record_ml('Redirect To Auto Training Page')
if session['project_type'] == 3:
return render_template('model_training/auto_training.html',
project_type=session['project_type'],
target_column=session['target_column'], status="error",
msg="Auto Training is not available for Clustering!!!")
return render_template('model_training/auto_training.html', project_type=session['project_type'],
target_column=session['target_column'])
elif action == 'custom_training' or action == 'final_train_model':
query = f""" select a.pid ProjectId , a.TargetColumn TargetName,
a.Model_Name ModelName,
b.Schedule_date,
b.schedule_time ,
a.Model_Trained,
b.train_status ,
b.email,
b.deleted
from tblProjects as a
join tblProject_scheduler as b on a.Pid = b.ProjectId where b.ProjectId = '{session.get('project_name')}'
and b.deleted=0
"""
result = mysql.fetch_one(query)
if result is not None:
return render_template('scheduler/training_blocker.html')
logger.info('Redirect To Custom Training Page')
ProjectReports.insert_record_ml('Redirect To Custom Training Page')
try:
if session['project_type'] == 2:
return render_template('model_training/classification.html', action=action,
models=CLASSIFICATION_MODELS)
elif session['project_type'] == 1:
return render_template('model_training/regression.html', action=action,
models=REGRESSION_MODELS)
elif session['project_type'] == 3:
return render_template('model_training/clustering.html', action=action,
models=CLUSTERING_MODELS)
else:
return render_template('model_training/custom_training.html')
except Exception as e:
logger.error(e)
return render_template('model_training/custom_training.html')
else:
return 'Non-Implemented Action'
else:
return redirect('/')
else:
return redirect(url_for('/'))
except Exception as e:
logger.error('Error in Model Training')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/model_training/<action>', methods=['POST'])
def model_training_post(action):
try:
if 'pid' in session:
df = load_data()
model = None
range = None
random_state = None
if df is not None:
if action == 'help':
return render_template('model_training/help.html')
elif action == 'custom_training':
try:
model = request.form['model']
range = int(request.form['range'])
if model != "KNeighborsClassifier" and model != "SVR":
random_state = int(request.form['random_state'])
logger.info('Submitted Custom Training Page')
ProjectReports.insert_record_ml('Submitted Custom Training Page',
f"Model:{model}; Range:{range}; Random_State: {random_state}")
target = session['target_column']
if session['project_type'] != 3:
X = df.drop(target, axis=1)
y = df[target]
train_model_fun = None
X_train, X_test, y_train, y_test = FeatureEngineering.train_test_Split(cleanedData=X,
label=y,
train_size=range / 100,
random_state=random_state)
model_params = {}
if model == "LinearRegression":
Model_Params = LinearRegression_Params
train_model_fun = RegressionModels.linear_regression_regressor
elif model == "Ridge":
Model_Params = Ridge_Params
train_model_fun = RegressionModels.ridge_regressor
elif model == "Lasso":
Model_Params = Lasso_Params
train_model_fun = RegressionModels.lasso_regressor
elif model == "ElasticNet":
Model_Params = ElasticNet_Params
train_model_fun = RegressionModels.elastic_net_regressor
elif model == "DecisionTreeRegressor":
Model_Params = DecisionTreeRegressor_Params
train_model_fun = RegressionModels.decision_tree_regressor
elif model == "RandomForestRegressor":
Model_Params = RandomForestRegressor_Params
train_model_fun = RegressionModels.random_forest_regressor
elif model == "SVR":
Model_Params = SVR_params
train_model_fun = RegressionModels.support_vector_regressor
elif model == "AdaBoostRegressor":
Model_Params = AdabootRegressor_Params
train_model_fun = RegressionModels.ada_boost_regressor
elif model == "GradientBoostingRegressor":
Model_Params = GradientBoostRegressor_Params
train_model_fun = RegressionModels.gradient_boosting_regressor
elif model == "LogisticRegression":
Model_Params = LogisticRegression_Params
train_model_fun = ClassificationModels.logistic_regression_classifier
elif model == "SVC":
Model_Params = SVC_Params
train_model_fun = ClassificationModels.support_vector_classifier
elif model == "KNeighborsClassifier":
print('here')
Model_Params = KNeighborsClassifier_Params
train_model_fun = ClassificationModels.k_neighbors_classifier
elif model == "DecisionTreeClassifier":
Model_Params = DecisionTreeClassifier_Params
train_model_fun = ClassificationModels.decision_tree_classifier
elif model == "RandomForestClassifier":
Model_Params = RandomForestClassifier_Params
train_model_fun = ClassificationModels.random_forest_classifier
elif model == "AdaBoostClassifier":
Model_Params = AdaBoostClassifier_Params
train_model_fun = ClassificationModels.ada_boost_classifier
elif model == "GradientBoostClassifier":
Model_Params = GradientBoostingClassifier_Params
train_model_fun = ClassificationModels.gradient_boosting_classifier
else:
return 'Non-Implemented Action'
for param in Model_Params:
model_params[param['name']] = get_param_value(param, request.form[param['name']])
trained_model = train_model_fun(X_train, y_train, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model)
reports = [{"key": "Model Name", "value": model},
{"key": "Data Size", "value": len(df)},
{"key": "Trained Data Size", "value": len(X_train)},
{"key": "Test Data Size", "value": len(X_test)}]
scores = []
if trained_model is not None and session['project_type'] == 1:
y_pred = trained_model.predict(X_test)
scores.append({"key": "r2_score", "value": r2_score(y_test, y_pred)})
scores.append(
{"key": "mean_absolute_error", "value": mean_absolute_error(y_test, y_pred)})
scores.append(
{"key": "mean_squared_error", "value": mean_squared_error(y_test, y_pred)})
query = f'''Update tblProjects Set Model_Name="{model}", Model_Trained=0 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
if trained_model is not None and session['project_type'] == 2:
y_pred = trained_model.predict(X_test)
scores.append({"key": "Accuracy", "value": accuracy_score(y_test, y_pred)})
scores.append({"key": "Classes", "value": df[target].unique()})
scores.append(
{"key": "Precision", "value": precision_score(y_test, y_pred, average=None)})
scores.append({"key": "Recall", "value": recall_score(y_test, y_pred, average=None)})
scores.append({"key": "F1_score", "value": f1_score(y_test, y_pred, average=None)})
query = f'''Update tblProjects Set Model_Name="{model}", Model_Trained=0 Where Id="{session.get('pid')}"'''
result = mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
elif session['project_type'] == 3:
X = df
train_model_fun = None
model_params = {}
if model == "KMeans":
Model_Params = KmeansClustering_Params
train_model_fun = ClusteringModels.kmeans_clustering
elif model == "DBSCAN":
Model_Params = DbscanClustering_Params
train_model_fun = ClusteringModels.dbscan_clustering
elif model == "AgglomerativeClustering":
Model_Params = AgglomerativeClustering_Params
train_model_fun = ClusteringModels.agglomerative_clustering
else:
return 'Non-Implemented Action'
for param in Model_Params:
model_params[param['name']] = get_param_value(param, request.form[param['name']])
trained_model, y_pred = train_model_fun(X, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model)
reports = [{"key": "Model Name", "value": model},
{"key": "Data Size", "value": len(df)},
{"key": "Train Data Size", "value": len(X)},
{"key": "Test Data Size", "value": 0}]
scores = []
if trained_model is not None and session['project_type'] == 3:
scores.append({"key": "Predicted Classes",
"value": pd.DataFrame(data=y_pred, columns=['y_pred'])[
'y_pred'].unique()})
query = f'''Update tblProjects Set Model_Name="{model}", Model_Trained=0 Where Id="{session.get('pid')}"'''
result = mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
else:
raise Exception("Model Couldn't train, please check parametes")
except Exception as e:
logger.error('Error Submitted Custom Training Page')
ProjectReports.insert_record_ml('Error Submitted Custom Training Page',
f"Model:{model}; Range:{range}; Random_State: {random_state}",
'', 0, str(e))
if session['project_type'] == 2:
return render_template('model_training/classification.html', action=action,
models=CLASSIFICATION_MODELS, status="error", msg=str(e))
elif session['project_type'] == 1:
return render_template('model_training/regression.html', action=action,
models=REGRESSION_MODELS, status="error", msg=str(e))
else:
return render_template('model_training/clustering.html', action=action,
models=CLUSTERING_MODELS, status="error", msg=str(e))
elif action == "auto_training":
try:
target = session['target_column']
if target is None:
return redirect(url_for('/target-column'))
# data_len = len(df)
# data_len = 10000 if data_len > 10000 else int(len(df) * 0.9)
# df = df.sample(frac=1).loc[:data_len, :]
trainer = None
X = df.drop(target, axis=1)
y = df[target]
X_train, X_test, y_train, y_test = FeatureEngineering.train_test_Split(cleanedData=X,
label=y,
train_size=0.75,
random_state=101)
if session['project_type'] == 1:
trainer = ModelTrain_Regression(X_train, X_test, y_train, y_test, True)
result = trainer.results()
result = result.to_html()
return render_template('model_training/auto_training.html', status="success",
project_type=session['project_type'],
target_column=session['target_column'], train_done=True,
result=result)
elif session['project_type'] == 2:
trainer = ModelTrain_Classification(X_train, X_test, y_train, y_test, True)
result = trainer.results()
result = result.to_html()
return render_template('model_training/auto_training.html', status="success",
project_type=session['project_type'],
target_column=session['target_column'], train_done=True,
result=result)
except Exception as ex:
return render_template('model_training/auto_training.html', status="error",
project_type=session['project_type'],
target_column=session['target_column'], msg=str(ex))
elif action == 'final_train_model':
try:
logger.info('Final Train Model')
ProjectReports.insert_record_ml('Final Train Model')
query = f'''select Model_Name from tblProjects Where Id="{session.get('pid')}"'''
model_name = mysql.fetch_one(query)[0]
if session['project_type'] != 3:
target = session['target_column']
X = df.drop(target, axis=1)
y = df[target]
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', action=action,
status="error",
msg="Model is not found, please train model again")
else:
model_params = {}
for key, value in model.get_params().items():
model_params[key] = value
if model_name == "LinearRegression":
train_model_fun = RegressionModels.linear_regression_regressor
elif model_name == "Ridge":
train_model_fun = RegressionModels.ridge_regressor
elif model_name == "Lasso":
train_model_fun = RegressionModels.lasso_regressor
elif model_name == "ElasticNet":
train_model_fun = RegressionModels.elastic_net_regressor
elif model_name == "DecisionTreeRegressor":
train_model_fun = RegressionModels.decision_tree_regressor
elif model_name == "RandomForestRegressor":
train_model_fun = RegressionModels.random_forest_regressor
elif model_name == "SVR":
train_model_fun = RegressionModels.support_vector_regressor
elif model_name == "AdaBoostRegressor":
train_model_fun = RegressionModels.ada_boost_regressor
elif model_name == "GradientBoostingRegressor":
train_model_fun = RegressionModels.gradient_boosting_regressor
elif model_name == "LogisticRegression":
train_model_fun = ClassificationModels.logistic_regression_classifier
elif model_name == "SVC":
train_model_fun = ClassificationModels.support_vector_classifier
elif model_name == "KNeighborsClassifier":
train_model_fun = ClassificationModels.k_neighbors_classifier
elif model_name == "DecisionTreeClassifier":
train_model_fun = ClassificationModels.decision_tree_classifier
elif model_name == "RandomForestClassifier":
train_model_fun = ClassificationModels.random_forest_classifier
elif model_name == "AdaBoostClassifier":
train_model_fun = ClassificationModels.ada_boost_classifier
elif model_name == "GradientBoostClassifier":
train_model_fun = ClassificationModels.gradient_boosting_classifier
else:
return 'Non-Implemented Action'
trained_model = train_model_fun(X, y, True, **model_params)
"""Save Final Model"""
save_project_model(trained_model, 'model.pkl')
query = f'''Update tblProjects Set Model_Trained=1 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
logger.info('Final Training Done')
ProjectReports.insert_record_ml('Final Training Done')
return render_template('model_training/congrats.html')
elif session['project_type'] == 3:
X = df
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', action=action,
status="error",
msg="Model is not found, please train model again")
else:
model_params = {}
for key, value in model.get_params().items():
model_params[key] = value
if model_name == "KMeans":
train_model_fun = ClusteringModels.kmeans_clustering
elif model_name == "DBSCAN":
train_model_fun = ClusteringModels.dbscan_clustering
elif model_name == "AgglomerativeClustering":
train_model_fun = ClusteringModels.agglomerative_clustering
else:
return 'Non Implemented mtd'
trained_model, y_pred = train_model_fun(X, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model, 'model.pkl')
query = f'''Update tblProjects Set Model_Trained=1 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
logger.info('Final Training Done')
ProjectReports.insert_record_ml('Final Training Done')
return render_template('model_training/congrats.html')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
render_template('model_training/model_result.html', action=action, status="error",
msg="Model is not found, please train model again")
if action == "Scheduled_model":
path = os.path.join(from_root(), 'artifacts', 'model_temp.pkl')
pass
else:
return "Non Implemented Method"
else:
logger.critical('DataFrame has no data')
return redirect('/')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/congrats', methods=['GET', 'POST'])
def congrats():
try:
if 'pid' in session:
df = load_data()
if df is not None:
target = session['target_column']
X = df.drop(target, axis=1)
y = df[target]
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', status="error",
msg="Model is not found, please train model again")
else:
for key, value in model.get_params():
exec(key + "=value")
logger.info('Loaded Congrats Page')
ProjectReports.insert_record_ml('Loaded Congrats Page')
if request.method == "GET":
return render_template('model_training/congrats.html')
else:
return render_template('model_training/congrats.html')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/prediction', methods=['GET', 'POST'])
def prediction():
try:
if 'pid' in session:
file_path = ""
logger.info('Loaded Prediction Page')
ProjectReports.insert_record_ml('Loaded Prediction Page')
if request.method == "GET":
is_trained = mysql.fetch_all(
f"SELECT * FROM tblProjects WHERE Id ={session.get('pid')} AND Model_Trained=1")
if is_trained is None:
return render_template('model_training/prediction_page.html', status="error",
msg="your model is not trained, please train model first")
else:
return render_template('model_training/prediction_page.html', status="success")
else:
try:
f = request.files['file']
ALLOWED_EXTENSIONS = ['csv', 'tsv', 'json']
msg = ""
if len(request.files) == 0:
msg = 'Please select a file to upload'
elif f.filename.strip() == '':
msg = 'Please select a file to upload'
elif f.filename.rsplit('.', 1)[1].lower() not in ALLOWED_EXTENSIONS:
msg = 'This file format is not allowed, please select mentioned one'
if msg:
logger.error(msg)
return render_template('model_training/prediction_page.html', status="error", msg=msg)
filename = secure_filename(f.filename)
file_path = os.path.join(config_args['dir_structure']['upload_folder'], filename)
f.save(file_path)
if file_path.endswith('.csv'):
df = pd.read_csv(file_path)
elif file_path.endswith('.tsv'):
df = pd.read_csv(file_path, sep='\t')
elif file_path.endswith('.json'):
df = pd.read_json(file_path)
else:
msg = 'This file format is currently not supported'
logger.info(msg)
return render_template('model_training/prediction_page.html', status="error", msg=msg)
prediction = make_prediction(df)
data = prediction.to_html()
if len(data) > 0:
save_prediction_result(prediction)
return render_template('model_training/prediction_result.html', status="success", data=data)
else:
return render_template('model_training/prediction_result.html', status="error",
msg="There is some issue, coudn't perform prediction. Please check your data")
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('model_training/prediction_page.html', status="error", msg=str(e))
finally:
if file_path:
os.remove(file_path)
else:
logger.error('Project id not found, redirect to home page')
ProjectReports.insert_record_ml('Project id not found, redirect to home page', '', '', 0, 'Error')
return redirect('/')
except Exception as e:
logger.error(e)
return redirect('/')
@app_training.route('/download_prediction', methods=['POST'])
def download_prediction():
try:
return load_prediction_result()
except Exception as e:
logger.error(e)
return jsonify({'success': False})
@app_training.route('/model_training/ann', methods=['GET'])
def ann_training():
try:
return render_template('model_training/ann.html', optimizers=OPTIMIZERS,
activation_functions=ACTIVATION_FUNCTIONS, loss=REGRESSION_LOSS)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
def save_neural_network(checkpoint, name='model_temp.pth.tar'):
path = os.path.join(from_root(), 'artifacts', session.get('project_name'))
if not os.path.exists(path):
os.mkdir(path)
file_name = os.path.join(path, name)
torch.save(checkpoint, file_name)
def load_neural_network(checkpoint, name='model_temp.pth.tar'):
path = os.path.join(from_root(), 'artifacts', session.get('project_name'))
if not os.path.exists(path):
os.mkdir(path)
file_name = os.path.join(path, name)
torch.save(checkpoint, file_name)
def create_layers(data=None, df=None, feature_map={}, typ=None):
layers = []
activation = {'ReLU': nn.ReLU(),
'ELU': nn.ELU(),
'LeakyReLU': nn.LeakyReLU(),
'Softmax': nn.Softmax(),
'PReLU': nn.PReLU(),
'SELU': nn.SELU(),
'Tanh': nn.Tanh(),
'Softplus': nn.Softplus(),
'Softmin': nn.Softmin(),
'Sigmoid': nn.Sigmoid(),
'RReLU': nn.RReLU(),
}
infer_in = data[0]['units']
for i in data:
if i['type'] == 'input':
in_feature = df.shape[1]
out_feature = i['units']
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
layers.append(activation[i['activation']])
if i['type'] == 'linear':
in_feature = infer_in
out_feature = i['units']
layers.append(nn.Linear(in_feature, out_feature))
layers.append(activation[i['activation']])
infer_in = out_feature
if i['type'] == 'batch_normalization':
layers.append(nn.BatchNorm1d(num_features=infer_in))
if i['type'] == 'dropout':
layers.append(nn.Dropout(p=i['percentage']))
if i['type'] == 'output':
if typ == 'Regression':
in_feature = infer_in
out_feature = 1
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
if typ == 'Classification':
in_feature = infer_in
out_feature = len(feature_map.keys())
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
if typ == 'cluestring':
return 'CLuestring cant be performed using Ann'
return layers
class CustomTrainData(Dataset):
def __init__(self, train_df, target):
self.train_df = train_df
self.target = target
self.x = torch.from_numpy(self.train_df.to_numpy())
self.y = torch.from_numpy(self.target.to_numpy())
self.n_sample = self.train_df.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_sample
class CustomTestData(Dataset):
def __init__(self, test_df, target):
self.test_df = test_df
self.target = target
self.x = torch.from_numpy(self.test_df.to_numpy())
self.y = torch.from_numpy(self.target.to_numpy())
self.n_sample = self.test_df.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_sample
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
table.add_row([name, param])
total_params += param
return table, total_params
def trainTestSplit(df, target, size=0.25):
X = df.drop(target, axis=1)
y = df[target]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1 - size, random_state=101)
return X_train, X_test, y_train, y_test
def main(Data=None, df=None, target=None, size=None, num_epoch=None, typ=None):
model_info = {}
model_metrice = {}
model_metrice_plot = {}
feature_map = {}
if typ == 'Classification':
for i in enumerate(df[target].unique()):
feature_map[i[1]] = i[0]
df[target] = df[target].replace(feature_map)
model_info['feature_map'] = feature_map
model_info['split_size'] = size
model_info['batch_size'] = 32
X_train, X_test, y_train, y_test = trainTestSplit(df, target, size=size)
trainData = CustomTrainData(X_train, y_train)
testData = CustomTestData(X_test, y_test)
train_data_loader = DataLoader(trainData, batch_size=32, shuffle=True)
test_data_loader = DataLoader(testData, batch_size=32)
model = nn.Sequential(*create_layers(Data['layerUnits'], X_train, feature_map, typ))
print(model)
table, total_params = count_parameters(model)
model_info['table'] = table.get_html_string()
model_info['total_params'] = total_params
model_info['optimizer'] = Data['optimizers']
model_info['loss'] = Data['loss']
model_info['model'] = list(model)
optimizer_selection = {'Adam': torch.optim.Adam(model.parameters(), lr=float(Data['learningRate'])),
'AdaGrad': torch.optim.Adagrad(model.parameters(), lr=float(Data['learningRate'])),
'AdaMax': torch.optim.Adamax(model.parameters(), lr=float(Data['learningRate'])),
'RMSProps': torch.optim.RMSprop(model.parameters(), lr=float(Data['learningRate']))}
optimizer = optimizer_selection[Data['optimizers']]
if typ == "Classification":
loss_selection_classification = {'BCEWithLogitsLoss': nn.BCEWithLogitsLoss(), 'CrossEntropyLoss': nn.CrossEntropyLoss()}
loss_func = loss_selection_classification[Data['loss']]
if typ == "Regression":
loss_selection_regression = {'MAE': nn.L1Loss(), 'MSE': nn.MSELoss(), 'Huber Loss': nn.HuberLoss(),
'Smoth L1': nn.SmoothL1Loss()}
loss_func = loss_selection_regression[Data['loss']]
print(loss_func)
if typ == "Regression":
loss_perEpoch = []
model.train()
num_epochs = num_epoch
for epooch in range(num_epochs):
for batch_idx, data in enumerate(train_data_loader):
features = data[0].float()
labels = data[1].float().reshape(features.shape[0],1)
optimizer.zero_grad()
output = model(features)
loss = loss_func(output, labels)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
loss_perEpoch.append(loss.item())
print(f'Epoch {epooch}/{num_epochs} Loss: {loss.item()}')
model_metrice['train_loss'] = loss_perEpoch[-1]
model_metrice_plot['train_loss'] = loss_perEpoch
model_metrice_plot['train_accuracy'] = [x for x in range(len(loss_perEpoch))]
model.eval()
test_loss = []
with torch.no_grad():
for idx, data in enumerate(test_data_loader):
features = data[0].float()
labels = data[1].float().reshape(features.shape[0],1)
output = model(features)
test_loss.append(loss_func(output, labels).item())
model_metrice['test_loss'] = np.mean(test_loss)
model_metrice['test_accuracy'] = None
model_metrice_plot['test_loss'] = test_loss
model_metrice_plot['test_accuracy'] = [x for x in range(len(test_loss))]
print("Test Loss :", np.mean(test_loss))
if typ == 'Classification':
loss_perEpoch = []
train_acc = []
model.train()
num_epochs = num_epoch
for epooch in range(num_epochs):
for batch_idx, data in enumerate(train_data_loader):
features = data[0].float()
labels = data[1]
optimizer.zero_grad()
output = model(features)
loss = loss_func(output, labels)
loss.backward()
optimizer.step()
if batch_idx % 8 == 0:
train_acc.append((torch.argmax(output, axis=1) == labels.squeeze().long()).float().mean())
loss_perEpoch.append(loss.item())
print(f'Epoch {epooch}/{num_epochs} Loss: {loss.item()}')
model_metrice['train_loss'] = loss_perEpoch[-1]
model_metrice_plot['train_loss'] = loss_perEpoch
model_metrice_plot['train_accuracy'] = train_acc
model.eval()
test_loss = []
test_acc = []
with torch.no_grad():
for idx, data in enumerate(test_data_loader):
features = data[0].float()
labels = data[1]
output = model(features)
test_acc.append((torch.argmax(output, axis=1) == labels.squeeze().long()).float().mean())
test_loss.append(loss_func(output, labels).item())
print("Test Loss :", np.mean(test_loss), " ", "Test Accuracy :", np.mean(test_acc))
model_metrice['test_accuracy'] = np.mean(test_acc)
model_metrice['test_loss'] = np.mean(test_loss)
model_metrice_plot['test_loss'] = test_loss
model_metrice_plot['test_accuracy'] = [x for x in range(len(test_loss))]
return model_info, model_metrice, model_metrice_plot
@app_training.route('/model_training/ann', methods=['POST'])
def ann_model_training():
try:
data = request.get_json(force=True)
print(data)
df = load_data()
target = session['target_column']
typ = 'Regression' if session['project_type'] == 1 else 'Classification'
model_info, model_metrice, model_metrice_plot = main(data, df, target=target, size=float(data['trainSplitPercent']), num_epoch=int(data['epoch']), typ=typ)
graphJSON = {}
graphJSON['train'] = PlotlyHelper.line(df, x=model_metrice_plot['train_accuracy'], y=model_metrice_plot['train_loss'])
graphJSON['test'] = PlotlyHelper.line(df, x=model_metrice_plot['test_accuracy'], y=model_metrice_plot['test_loss'])
return render_template('model_training/ann_summary.html', model_info=model_info, model_metrice=model_metrice, status="success", graphJSON=graphJSON)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
@app_training.route('/model_training/cnn', methods=['GET'])
def cnn_training():
try:
return render_template('model_training/cnn.html', optimizers=OPTIMIZERS, poolings = POOLING,
activation_functions=ACTIVATION_FUNCTIONS, loss=REGRESSION_LOSS)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app_training.route('/model_training/upload_zip', methods=['POST'])
def cnn_model_training():
try:
if 'zip_file' not in request.files:
print('No file part')
file = request.files['zip_file']
if file.filename == '':
print('No selected file')
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(UPLOAD_FOLDER, filename))
return jsonify({'success': True})
except Exception as e:
logger.error(e)
return jsonify({'success': False})
| true | true |
f7ff82117f18da189f3ac21b9b4c617298189b55 | 18,781 | py | Python | tools/common-models/src/metrics/results.py | emotive-computing/mosaic_stress_2021 | be4e0f2e0f0455d97cf6c9b5fd6dac60872d94c7 | [
"MIT"
] | null | null | null | tools/common-models/src/metrics/results.py | emotive-computing/mosaic_stress_2021 | be4e0f2e0f0455d97cf6c9b5fd6dac60872d94c7 | [
"MIT"
] | null | null | null | tools/common-models/src/metrics/results.py | emotive-computing/mosaic_stress_2021 | be4e0f2e0f0455d97cf6c9b5fd6dac60872d94c7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import abc
from collections import defaultdict
from math import sqrt
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from sklearn.metrics import accuracy_score, r2_score, mean_squared_error, f1_score, recall_score, precision_score, \
average_precision_score, cohen_kappa_score
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import label_binarize
from src.common import utils
from src.configuration.settings_template import Settings
from src.io.read_data_input import Dataset
from src.metrics.metrics import Metrics
from src.metrics.output_columns import RunInfoOutputColumnNames, MetricsOutputColumnNames, \
NumExamplesOutputColumnNames, CorrelationOutputColumnNames, TrueVsPredictedNumExamplesOutputColumnNames, \
AdditionalMetricsOutputColumnNames, PROBABILITY_COLUMN_NAME_SUFFIX, RegressionMetricsOutputColumnNames, FoldGroupByOutputColumnNames
from src.run.model_run_instance import ModelInfoOnlyInstance
class ResultMetrics(Metrics):
@classmethod
def get_child_type(cls):
if Settings.PREDICTION.is_regression():
return RegressionResultMetrics
elif Settings.PREDICTION.is_multiclass():
return MulticlassResultMetrics
else:
return ClassificationResultMetrics
def get_child_type_from_instance(self):
return type(self).get_child_type()
@classmethod
def get(cls, model_run_instance):
child_type = cls.get_child_type()
return child_type(model_run_instance)
@classmethod
def get_output_column_names(cls, df, include_groupby=False):
if Settings.SHOW_GROUP_BY_COLUMN_VALUE:
return cls.get_child_type().get_output_column_names(df, include_groupby)
return cls.get_child_type().get_output_column_names(df)
# Gets base set of metrics
@abc.abstractmethod
def get_metrics(self, y_true, probabilities):
pass
# Gets set of metrics across all folds
@abc.abstractmethod
def get_metrics_per_run_instance(self, y_true, probabilities):
pass
# Gets metrics per single fold
def get_metrics_per_fold(self, y_true, probabilities, num_train=None, num_test=None):
metrics = self.get_metrics(y_true, probabilities)
if num_train is not None:
metrics[NumExamplesOutputColumnNames.Num_train_examples.name] = num_train
if num_test is not None:
metrics[NumExamplesOutputColumnNames.Num_test_examples.name] = num_test
if num_train is not None and num_test is not None:
metrics[NumExamplesOutputColumnNames.Total_num_examples.name] = num_train + num_test
return metrics
@classmethod
def get_metrics_from_all_predictions(cls, all_predictions):
return cls.get_child_type().get_metrics_from_all_predictions(all_predictions)
@classmethod
def get_positive_probabilities(cls, probabilities):
return cls.get_child_type().get_positive_probabilities(probabilities)
########################################################################################################################
class RegressionResultMetrics(ResultMetrics):
def get_metrics_per_run_instance(self, y_true, probabilities):
metrics = self.get_metrics(y_true, probabilities)
print("Finished running: ", self.model_run_instance)
return metrics
def get_metrics(self, y_true, probabilities):
metrics = defaultdict()
metrics[RunInfoOutputColumnNames.Model.name] = self.model_run_instance.model_name
metrics[RunInfoOutputColumnNames.Label.name] = self.model_run_instance.label
metrics[RunInfoOutputColumnNames.Feature_source.name] = self.model_run_instance.feature_source_name
metrics[RegressionMetricsOutputColumnNames.R2_score.name] = r2_score(y_true, probabilities)
metrics[RegressionMetricsOutputColumnNames.RMSE_score.name] = sqrt(mean_squared_error(y_true, probabilities))
metrics[CorrelationOutputColumnNames.Pearson_correlation.name], metrics[
CorrelationOutputColumnNames.Pearson_corr_p_value.name] = pearsonr(y_true, probabilities)
metrics[CorrelationOutputColumnNames.Spearman_correlation.name], metrics[
CorrelationOutputColumnNames.Spearman_corr_p_value.name] = spearmanr(y_true, probabilities)
metrics[NumExamplesOutputColumnNames.Total_num_examples.name] = len(y_true)
print("Pearson Correlation: {}".format(metrics[CorrelationOutputColumnNames.Pearson_correlation.name]))
print("Spearman Correlation: {}".format(metrics[CorrelationOutputColumnNames.Spearman_correlation.name]))
return metrics
@classmethod
def get_metrics_from_all_predictions(cls, all_predictions):
all_results = pd.DataFrame()
groups = all_predictions.groupby(
[RunInfoOutputColumnNames.Model.name, RunInfoOutputColumnNames.Feature_source.name,
RunInfoOutputColumnNames.Label.name])
for name, group in groups:
model_run_instance = ModelInfoOnlyInstance(model_name=name[0], feature_source_name=name[1], label=name[2])
all_results = all_results.append(
RegressionResultMetrics(model_run_instance).get_metrics(group.True_value.values,
group.Predicted_value.values),
ignore_index=True)
return all_results
@classmethod
def get_output_column_names(self, df):
return RunInfoOutputColumnNames.list_member_names() + \
RegressionMetricsOutputColumnNames.list_member_names() + \
CorrelationOutputColumnNames.list_member_names() + \
NumExamplesOutputColumnNames.get_columns_to_show_in_output(df)
@classmethod
def get_positive_probabilities(cls, probabilities):
return probabilities
########################################################################################################################
# Find metrics for model / label such as AUC, Accuracy, etc...
class ClassificationResultMetrics(ResultMetrics):
def get_predictions_from_probabilities(self, probabilities):
if not isinstance(self.model_run_instance.label, list):
if probabilities.ndim == 1:
return np.argmax(probabilities)
else:
return np.argmax(probabilities, axis=1)
else:
return np.argmax(probabilities, axis=2)
# Get metrics (like AUROC, etc... ) based on predictions and probability scores of predictions
def get_metrics_per_run_instance(self, y_true, probabilities):
# In the case of multi-class prediction (the y label is an array of labels)
if isinstance(self.model_run_instance.label, list):
metrics = []
for idx, lbl in enumerate(self.model_run_instance.label):
individual_metrics_for_label_class = type(self)(self.model_run_instance.get_new_instance_with_label(lbl))
individual_metrics_for_label = individual_metrics_for_label_class.get_metrics(y_true[:, idx], probabilities[:, idx], le)
metrics.append(individual_metrics_for_label)
else:
metrics = self.get_metrics(y_true, probabilities)
print("Finished running: ", self.model_run_instance)
return metrics
def get_metrics(self, y_true, probabilities):
predictions = self.get_predictions_from_probabilities(probabilities)
metrics = defaultdict()
metrics[RunInfoOutputColumnNames.Model.name] = self.model_run_instance.model_name
metrics[RunInfoOutputColumnNames.Label.name] = self.model_run_instance.label
metrics[RunInfoOutputColumnNames.Feature_source.name] = self.model_run_instance.feature_source_name
metrics[MetricsOutputColumnNames.Accuracy.name] = accuracy_score(y_true, predictions)
metrics[NumExamplesOutputColumnNames.Total_num_examples.name] = len(y_true)
le = Dataset().get_saved_label_encoder(self.model_run_instance.label)
if le.is_binary_prediction:
# negative_class_probabilities, positive_class_probabilities = probabilities[:, 0]
# positive_class_probabilities = probabilities[:, 1]
negative_class_probabilities, positive_class_probabilities = list(zip(*probabilities))
metrics[MetricsOutputColumnNames.AUC.name] = roc_auc_score(y_true, list(positive_class_probabilities))
metrics[TrueVsPredictedNumExamplesOutputColumnNames.True_num_pos_examples.name] = len(
[i for i in y_true if i == 1])
metrics[TrueVsPredictedNumExamplesOutputColumnNames.True_base_rate.name] = \
metrics[TrueVsPredictedNumExamplesOutputColumnNames.True_num_pos_examples.name] / metrics[
NumExamplesOutputColumnNames.Total_num_examples.name]
metrics[TrueVsPredictedNumExamplesOutputColumnNames.Predicted_num_pos_examples.name] = len(
[i for i in predictions if i == 1])
metrics[TrueVsPredictedNumExamplesOutputColumnNames.Predicted_base_rate.name] = \
metrics[TrueVsPredictedNumExamplesOutputColumnNames.Predicted_num_pos_examples.name] / metrics[
NumExamplesOutputColumnNames.Total_num_examples.name]
metrics[AdditionalMetricsOutputColumnNames.F1_score_pos.name] = f1_score(y_true, predictions)
metrics[AdditionalMetricsOutputColumnNames.Precision_pos.name] = precision_score(y_true, predictions)
metrics[AdditionalMetricsOutputColumnNames.Recall_pos.name] = recall_score(y_true, predictions)
y_true_neg = utils.get_inverse_binary_values(y_true)
predictions_neg = utils.get_inverse_binary_values(predictions)
metrics[AdditionalMetricsOutputColumnNames.F1_score_neg.name] = f1_score(y_true_neg, predictions_neg)
metrics[AdditionalMetricsOutputColumnNames.Precision_neg.name] = precision_score(y_true_neg,
predictions_neg)
metrics[AdditionalMetricsOutputColumnNames.Recall_neg.name] = recall_score(y_true_neg, predictions_neg)
metrics[AdditionalMetricsOutputColumnNames.AUPRC_pos.name] = average_precision_score(y_true,
positive_class_probabilities)
print("AUC: {}".format(metrics[MetricsOutputColumnNames.AUC.name]))
print("AUPRC: {}".format(metrics[AdditionalMetricsOutputColumnNames.AUPRC_pos.name]))
else:
y_binarized = label_binarize(le.inverse_transform(y_true), le.classes_)
# metrics[data_config.CORREL_COLUMN_NAME], metrics[data_config.CORREL_P_VALUE_COLUMN_NAME] = pearsonr(y_binarized, probabilities)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
# for c in le.classes_:
# i = le.transform([c])[0]
# fpr[i], tpr[i], _ = roc_curve(y_binarized[:, i], probabilities[:, i])
# roc_auc[i] = auc(fpr[i], tpr[i])
# metrics[MetricsOutputColumnNames.AUC.name + "_" + c] = roc_auc[i]
# metrics[MetricsOutputColumnNames.AUC.name] = roc_auc_score(y_binarized, probabilities, average="weighted")
# metrics[CorrelationOutputColumnNames.Pearson_correlation.name], metrics[
# CorrelationOutputColumnNames.Pearson_corr_p_value.name] = "NA", "NA" # pearsonr(y_binarized, probabilities) # TODO
return metrics
@classmethod
def get_metrics_from_all_predictions(cls, all_predictions):
all_results = pd.DataFrame()
groups = all_predictions.groupby(
[RunInfoOutputColumnNames.Label.name, RunInfoOutputColumnNames.Feature_source.name,
RunInfoOutputColumnNames.Model.name])
for name, group in groups:
label = name[0]
print(label, " " , name[1])
_, _, le = Dataset().get(label) # TODO fix
probabilities = np.asarray([group[c + PROBABILITY_COLUMN_NAME_SUFFIX] for c in le.classes_]).T
model_run_instance = ModelInfoOnlyInstance(model_name=name[2], feature_source_name=name[1], label=name[0])
metrics = ClassificationResultMetrics(model_run_instance).get_metrics(
le.transform(group.True_value.values.astype('str')),
probabilities)
all_results = all_results.append(metrics, ignore_index=True)
print()
return all_results
@classmethod
def get_output_column_names(cls, df, include_groupby=False):
# lists out all AUC columns in case of more than binary prediction
auc_columns = [col for col in df.columns if MetricsOutputColumnNames.AUC.name == col]
columns_to_print = RunInfoOutputColumnNames.list_member_names() + \
auc_columns + \
[MetricsOutputColumnNames.Accuracy.name,
NumExamplesOutputColumnNames.Total_num_examples.name,
NumExamplesOutputColumnNames.Num_train_examples.name,
NumExamplesOutputColumnNames.Num_test_examples.name]
# if binary prediction
if len(auc_columns) == 1:
columns_to_print += TrueVsPredictedNumExamplesOutputColumnNames.list_member_names()
columns_to_print += AdditionalMetricsOutputColumnNames.list_member_names()
if include_groupby:
group_by_columns = [FoldGroupByOutputColumnNames.Train_Group_By_Value.name, FoldGroupByOutputColumnNames.Test_Group_By_Value.name]
columns_to_print += group_by_columns
return columns_to_print
@classmethod
def get_positive_probabilities(cls, probabilities):
return np.array(probabilities)[:, 1]
########################################################################################################################
# Find metrics for model / label such as AUC, Accuracy, etc...
class MulticlassResultMetrics(ResultMetrics):
def get_predictions_from_probabilities(self, probabilities):
return np.argmax(probabilities, axis=1) if not isinstance(self.model_run_instance.label, list) else np.argmax(
probabilities, axis=2)
# Get metrics (like AUROC, etc... ) based on predictions and probability scores of predictions
def get_metrics_per_run_instance(self, y_true, probabilities):
# In the case of multi-class prediction (the y label is an array of labels)
if isinstance(self.model_run_instance.label, list):
metrics = []
for idx, lbl in enumerate(self.model_run_instance.label):
individual_metrics_for_label_class = type(self)(self.model_run_instance.get_new_instance_with_label(lbl))
individual_metrics_for_label = individual_metrics_for_label_class.get_metrics(y_true[:, idx], probabilities[:, idx], le)
metrics.append(individual_metrics_for_label)
else:
metrics = self.get_metrics(y_true, probabilities)
print("Finished running: ", self.model_run_instance)
return metrics
def get_metrics(self, y_true, probabilities):
predictions = probabilities
metrics = defaultdict()
metrics[RunInfoOutputColumnNames.Model.name] = self.model_run_instance.model_name
metrics[RunInfoOutputColumnNames.Label.name] = self.model_run_instance.label
metrics[RunInfoOutputColumnNames.Feature_source.name] = self.model_run_instance.feature_source_name
metrics[MetricsOutputColumnNames.Accuracy.name] = accuracy_score(y_true, predictions)
metrics[NumExamplesOutputColumnNames.Total_num_examples.name] = len(y_true)
le = Dataset().get_saved_label_encoder(self.model_run_instance.label)
y_true_binarized = label_binarize(le.inverse_transform(y_true), le.classes_)
y_pred_binarized = label_binarize(le.inverse_transform(predictions), le.classes_)
metrics[AdditionalMetricsOutputColumnNames.F1_score.name] = f1_score(y_true, predictions, average='weighted')
metrics[AdditionalMetricsOutputColumnNames.Kappa.name] = cohen_kappa_score(y_true, predictions, weights='linear')
metrics[AdditionalMetricsOutputColumnNames.AUROC.name] = roc_auc_score(y_true_binarized, y_pred_binarized, average='weighted')
return metrics
@classmethod
def get_metrics_from_all_predictions(cls, all_predictions):
all_results = pd.DataFrame()
groups = all_predictions.groupby(
[RunInfoOutputColumnNames.Label.name, RunInfoOutputColumnNames.Feature_source.name,
RunInfoOutputColumnNames.Model.name])
for name, group in groups:
label = name[0]
print(label, " " , name[1])
_, _, le = Dataset().get(label) # TODO fix
probabilities = np.asarray([group[c + PROBABILITY_COLUMN_NAME_SUFFIX] for c in le.classes_]).T
model_run_instance = ModelInfoOnlyInstance(model_name=name[2], feature_source_name=name[1], label=name[0])
metrics = ClassificationResultMetrics(model_run_instance).get_metrics(
le.transform(group.True_value.values.astype('str')),
probabilities)
all_results = all_results.append(metrics, ignore_index=True)
print()
return all_results
@classmethod
def get_output_column_names(cls, df):
# lists out all AUC columns in case of more than binary prediction
auc_columns = [col for col in df.columns if MetricsOutputColumnNames.AUC.name == col]
columns_to_print = RunInfoOutputColumnNames.list_member_names() + \
auc_columns + \
[MetricsOutputColumnNames.Accuracy.name,
AdditionalMetricsOutputColumnNames.F1_score.name,
AdditionalMetricsOutputColumnNames.Kappa.name,
AdditionalMetricsOutputColumnNames.AUROC.name,
NumExamplesOutputColumnNames.Total_num_examples.name]
# # if binary prediction
# if len(auc_columns) == 1:
# columns_to_print += TrueVsPredictedNumExamplesOutputColumnNames.list_member_names()
# columns_to_print += AdditionalMetricsOutputColumnNames.list_member_names()
return columns_to_print
@classmethod
def get_positive_probabilities(cls, probabilities):
return probabilities
| 49.036554 | 142 | 0.689953 |
import abc
from collections import defaultdict
from math import sqrt
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from sklearn.metrics import accuracy_score, r2_score, mean_squared_error, f1_score, recall_score, precision_score, \
average_precision_score, cohen_kappa_score
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import label_binarize
from src.common import utils
from src.configuration.settings_template import Settings
from src.io.read_data_input import Dataset
from src.metrics.metrics import Metrics
from src.metrics.output_columns import RunInfoOutputColumnNames, MetricsOutputColumnNames, \
NumExamplesOutputColumnNames, CorrelationOutputColumnNames, TrueVsPredictedNumExamplesOutputColumnNames, \
AdditionalMetricsOutputColumnNames, PROBABILITY_COLUMN_NAME_SUFFIX, RegressionMetricsOutputColumnNames, FoldGroupByOutputColumnNames
from src.run.model_run_instance import ModelInfoOnlyInstance
class ResultMetrics(Metrics):
@classmethod
def get_child_type(cls):
if Settings.PREDICTION.is_regression():
return RegressionResultMetrics
elif Settings.PREDICTION.is_multiclass():
return MulticlassResultMetrics
else:
return ClassificationResultMetrics
def get_child_type_from_instance(self):
return type(self).get_child_type()
@classmethod
def get(cls, model_run_instance):
child_type = cls.get_child_type()
return child_type(model_run_instance)
@classmethod
def get_output_column_names(cls, df, include_groupby=False):
if Settings.SHOW_GROUP_BY_COLUMN_VALUE:
return cls.get_child_type().get_output_column_names(df, include_groupby)
return cls.get_child_type().get_output_column_names(df)
@abc.abstractmethod
def get_metrics(self, y_true, probabilities):
pass
@abc.abstractmethod
def get_metrics_per_run_instance(self, y_true, probabilities):
pass
def get_metrics_per_fold(self, y_true, probabilities, num_train=None, num_test=None):
metrics = self.get_metrics(y_true, probabilities)
if num_train is not None:
metrics[NumExamplesOutputColumnNames.Num_train_examples.name] = num_train
if num_test is not None:
metrics[NumExamplesOutputColumnNames.Num_test_examples.name] = num_test
if num_train is not None and num_test is not None:
metrics[NumExamplesOutputColumnNames.Total_num_examples.name] = num_train + num_test
return metrics
@classmethod
def get_metrics_from_all_predictions(cls, all_predictions):
return cls.get_child_type().get_metrics_from_all_predictions(all_predictions)
@classmethod
def get_positive_probabilities(cls, probabilities):
return cls.get_child_type().get_positive_probabilities(probabilities)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.